gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google ML Engine service.
"""
import os
from typing import Dict
from airflow import models
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.mlengine import (
MLEngineCreateModelOperator,
MLEngineCreateVersionOperator,
MLEngineDeleteModelOperator,
MLEngineDeleteVersionOperator,
MLEngineGetModelOperator,
MLEngineListVersionsOperator,
MLEngineSetDefaultVersionOperator,
MLEngineStartBatchPredictionJobOperator,
MLEngineStartTrainingJobOperator,
)
from airflow.providers.google.cloud.utils import mlengine_operator_utils
from airflow.utils.dates import days_ago
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
MODEL_NAME = os.environ.get("GCP_MLENGINE_MODEL_NAME", "model_name")
SAVED_MODEL_PATH = os.environ.get("GCP_MLENGINE_SAVED_MODEL_PATH", "gs://INVALID BUCKET NAME/saved-model/")
JOB_DIR = os.environ.get("GCP_MLENGINE_JOB_DIR", "gs://INVALID BUCKET NAME/keras-job-dir")
PREDICTION_INPUT = os.environ.get(
"GCP_MLENGINE_PREDICTION_INPUT", "gs://INVALID BUCKET NAME/prediction_input.json"
)
PREDICTION_OUTPUT = os.environ.get(
"GCP_MLENGINE_PREDICTION_OUTPUT", "gs://INVALID BUCKET NAME/prediction_output"
)
TRAINER_URI = os.environ.get("GCP_MLENGINE_TRAINER_URI", "gs://INVALID BUCKET NAME/trainer.tar.gz")
TRAINER_PY_MODULE = os.environ.get("GCP_MLENGINE_TRAINER_TRAINER_PY_MODULE", "trainer.task")
SUMMARY_TMP = os.environ.get("GCP_MLENGINE_DATAFLOW_TMP", "gs://INVALID BUCKET NAME/tmp/")
SUMMARY_STAGING = os.environ.get("GCP_MLENGINE_DATAFLOW_STAGING", "gs://INVALID BUCKET NAME/staging/")
default_args = {"params": {"model_name": MODEL_NAME}}
with models.DAG(
"example_gcp_mlengine",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as dag:
# [START howto_operator_gcp_mlengine_training]
training = MLEngineStartTrainingJobOperator(
task_id="training",
project_id=PROJECT_ID,
region="us-central1",
job_id="training-job-{{ ts_nodash }}-{{ params.model_name }}",
runtime_version="1.15",
python_version="3.7",
job_dir=JOB_DIR,
package_uris=[TRAINER_URI],
training_python_module=TRAINER_PY_MODULE,
training_args=[],
labels={"job_type": "training"},
)
# [END howto_operator_gcp_mlengine_training]
# [START howto_operator_gcp_mlengine_create_model]
create_model = MLEngineCreateModelOperator(
task_id="create-model",
project_id=PROJECT_ID,
model={
"name": MODEL_NAME,
},
)
# [END howto_operator_gcp_mlengine_create_model]
# [START howto_operator_gcp_mlengine_get_model]
get_model = MLEngineGetModelOperator(
task_id="get-model",
project_id=PROJECT_ID,
model_name=MODEL_NAME,
)
# [END howto_operator_gcp_mlengine_get_model]
# [START howto_operator_gcp_mlengine_print_model]
get_model_result = BashOperator(
bash_command="echo \"{{ task_instance.xcom_pull('get-model') }}\"",
task_id="get-model-result",
)
# [END howto_operator_gcp_mlengine_print_model]
# [START howto_operator_gcp_mlengine_create_version1]
create_version = MLEngineCreateVersionOperator(
task_id="create-version",
project_id=PROJECT_ID,
model_name=MODEL_NAME,
version={
"name": "v1",
"description": "First-version",
"deployment_uri": f'{JOB_DIR}/keras_export/',
"runtime_version": "1.15",
"machineType": "mls1-c1-m2",
"framework": "TENSORFLOW",
"pythonVersion": "3.7",
},
)
# [END howto_operator_gcp_mlengine_create_version1]
# [START howto_operator_gcp_mlengine_create_version2]
create_version_2 = MLEngineCreateVersionOperator(
task_id="create-version-2",
project_id=PROJECT_ID,
model_name=MODEL_NAME,
version={
"name": "v2",
"description": "Second version",
"deployment_uri": SAVED_MODEL_PATH,
"runtime_version": "1.15",
"machineType": "mls1-c1-m2",
"framework": "TENSORFLOW",
"pythonVersion": "3.7",
},
)
# [END howto_operator_gcp_mlengine_create_version2]
# [START howto_operator_gcp_mlengine_default_version]
set_defaults_version = MLEngineSetDefaultVersionOperator(
task_id="set-default-version",
project_id=PROJECT_ID,
model_name=MODEL_NAME,
version_name="v2",
)
# [END howto_operator_gcp_mlengine_default_version]
# [START howto_operator_gcp_mlengine_list_versions]
list_version = MLEngineListVersionsOperator(
task_id="list-version",
project_id=PROJECT_ID,
model_name=MODEL_NAME,
)
# [END howto_operator_gcp_mlengine_list_versions]
# [START howto_operator_gcp_mlengine_print_versions]
list_version_result = BashOperator(
bash_command="echo \"{{ task_instance.xcom_pull('list-version') }}\"",
task_id="list-version-result",
)
# [END howto_operator_gcp_mlengine_print_versions]
# [START howto_operator_gcp_mlengine_get_prediction]
prediction = MLEngineStartBatchPredictionJobOperator(
task_id="prediction",
project_id=PROJECT_ID,
job_id="prediction-{{ ts_nodash }}-{{ params.model_name }}",
region="us-central1",
model_name=MODEL_NAME,
data_format="TEXT",
input_paths=[PREDICTION_INPUT],
output_path=PREDICTION_OUTPUT,
labels={"job_type": "prediction"},
)
# [END howto_operator_gcp_mlengine_get_prediction]
# [START howto_operator_gcp_mlengine_delete_version]
delete_version = MLEngineDeleteVersionOperator(
task_id="delete-version", project_id=PROJECT_ID, model_name=MODEL_NAME, version_name="v1"
)
# [END howto_operator_gcp_mlengine_delete_version]
# [START howto_operator_gcp_mlengine_delete_model]
delete_model = MLEngineDeleteModelOperator(
task_id="delete-model", project_id=PROJECT_ID, model_name=MODEL_NAME, delete_contents=True
)
# [END howto_operator_gcp_mlengine_delete_model]
training >> create_version
training >> create_version_2
create_model >> get_model >> [get_model_result, delete_model]
create_model >> create_version >> create_version_2 >> set_defaults_version >> list_version
create_version >> prediction
create_version_2 >> prediction
prediction >> delete_version
list_version >> list_version_result
list_version >> delete_version
delete_version >> delete_model
# [START howto_operator_gcp_mlengine_get_metric]
def get_metric_fn_and_keys():
"""
Gets metric function and keys used to generate summary
"""
def normalize_value(inst: Dict):
val = float(inst['dense_4'][0])
return tuple([val]) # returns a tuple.
return normalize_value, ['val'] # key order must match.
# [END howto_operator_gcp_mlengine_get_metric]
# [START howto_operator_gcp_mlengine_validate_error]
def validate_err_and_count(summary: Dict) -> Dict:
"""
Validate summary result
"""
if summary['val'] > 1:
raise ValueError(f'Too high val>1; summary={summary}')
if summary['val'] < 0:
raise ValueError(f'Too low val<0; summary={summary}')
if summary['count'] != 20:
raise ValueError(f'Invalid value val != 20; summary={summary}')
return summary
# [END howto_operator_gcp_mlengine_validate_error]
# [START howto_operator_gcp_mlengine_evaluate]
evaluate_prediction, evaluate_summary, evaluate_validation = mlengine_operator_utils.create_evaluate_ops(
task_prefix="evaluate-ops",
data_format="TEXT",
input_paths=[PREDICTION_INPUT],
prediction_path=PREDICTION_OUTPUT,
metric_fn_and_keys=get_metric_fn_and_keys(),
validate_fn=validate_err_and_count,
batch_prediction_job_id="evaluate-ops-{{ ts_nodash }}-{{ params.model_name }}",
project_id=PROJECT_ID,
region="us-central1",
dataflow_options={
'project': PROJECT_ID,
'tempLocation': SUMMARY_TMP,
'stagingLocation': SUMMARY_STAGING,
},
model_name=MODEL_NAME,
version_name="v1",
py_interpreter="python3",
)
# [END howto_operator_gcp_mlengine_evaluate]
create_model >> create_version >> evaluate_prediction
evaluate_validation >> delete_version
| |
import pytest
import uuid
from api.base.settings.defaults import API_BASE
from api_tests import utils
from framework.auth.core import Auth
from osf.models import MetaSchema
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
ProjectFactory,
InstitutionFactory,
CollectionFactory,
CollectionProviderFactory,
)
from osf_tests.utils import mock_archive
from website import settings
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.search import elastic_search
from website.search import search
@pytest.mark.django_db
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
@pytest.mark.enable_quickfiles_creation
class ApiSearchTestCase:
@pytest.fixture(autouse=True)
def index(self):
settings.ELASTIC_INDEX = uuid.uuid4().hex
elastic_search.INDEX = settings.ELASTIC_INDEX
search.create_index(elastic_search.INDEX)
yield
search.delete_index(elastic_search.INDEX)
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def institution(self):
return InstitutionFactory(name='Social Experiment')
@pytest.fixture()
def collection_public(self, user):
return CollectionFactory(creator=user, provider=CollectionProviderFactory(), is_public=True, is_bookmark_collection=False)
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory(fullname='Kanye Omari West')
user_one.schools = [{
'degree': 'English',
'institution': 'Chicago State University'
}]
user_one.jobs = [{
'title': 'Producer',
'institution': 'GOOD Music, Inc.'
}]
user_one.save()
return user_one
@pytest.fixture()
def user_two(self, institution):
user_two = AuthUserFactory(fullname='Chance The Rapper')
user_two.affiliated_institutions.add(institution)
user_two.save()
return user_two
@pytest.fixture()
def project(self, user_one):
return ProjectFactory(
title='Graduation',
creator=user_one,
is_public=True)
@pytest.fixture()
def project_public(self, user_one):
project_public = ProjectFactory(
title='The Life of Pablo',
creator=user_one,
is_public=True)
project_public.set_description(
'Name one genius who ain\'t crazy',
auth=Auth(user_one),
save=True)
project_public.add_tag('Yeezus', auth=Auth(user_one), save=True)
return project_public
@pytest.fixture()
def project_private(self, user_two):
return ProjectFactory(title='Coloring Book', creator=user_two)
@pytest.fixture()
def component(self, user_one, project_public):
return NodeFactory(
parent=project_public,
title='Highlights',
creator=user_one,
is_public=True)
@pytest.fixture()
def component_public(self, user_two, project_public):
component_public = NodeFactory(
parent=project_public,
title='Ultralight Beam',
creator=user_two,
is_public=True)
component_public.set_description(
'This is my part, nobody else speak',
auth=Auth(user_two),
save=True)
component_public.add_tag('trumpets', auth=Auth(user_two), save=True)
return component_public
@pytest.fixture()
def component_private(self, user_one, project_public):
return NodeFactory(
parent=project_public,
title='Wavves',
creator=user_one)
@pytest.fixture()
def file_component(self, component, user_one):
return utils.create_test_file(
component, user_one, filename='Highlights.mp3')
@pytest.fixture()
def file_public(self, component_public, user_one):
return utils.create_test_file(
component_public,
user_one,
filename='UltralightBeam.mp3')
@pytest.fixture()
def file_private(self, component_private, user_one):
return utils.create_test_file(
component_private, user_one, filename='Wavves.mp3')
class TestSearch(ApiSearchTestCase):
@pytest.fixture()
def url_search(self):
return '/{}search/'.format(API_BASE)
def test_search_results(
self, app, url_search, user, user_one, user_two,
institution, component, component_private,
component_public, file_component, file_private,
file_public, project, project_public, project_private):
# test_search_no_auth
res = app.get(url_search)
assert res.status_code == 200
search_fields = res.json['search_fields']
users_found = search_fields['users']['related']['meta']['total']
files_found = search_fields['files']['related']['meta']['total']
projects_found = search_fields['projects']['related']['meta']['total']
components_found = search_fields['components']['related']['meta']['total']
registrations_found = search_fields['registrations']['related']['meta']['total']
assert users_found == 3
assert files_found == 2
assert projects_found == 2
assert components_found == 2
assert registrations_found == 0
# test_search_auth
res = app.get(url_search, auth=user.auth)
assert res.status_code == 200
search_fields = res.json['search_fields']
users_found = search_fields['users']['related']['meta']['total']
files_found = search_fields['files']['related']['meta']['total']
projects_found = search_fields['projects']['related']['meta']['total']
components_found = search_fields['components']['related']['meta']['total']
registrations_found = search_fields['registrations']['related']['meta']['total']
assert users_found == 3
assert files_found == 2
assert projects_found == 2
assert components_found == 2
assert registrations_found == 0
# test_search_fields_links
res = app.get(url_search)
assert res.status_code == 200
search_fields = res.json['search_fields']
users_link = search_fields['users']['related']['href']
files_link = search_fields['files']['related']['href']
projects_link = search_fields['projects']['related']['href']
components_link = search_fields['components']['related']['href']
registrations_link = search_fields['registrations']['related']['href']
assert '/{}search/users/?q=%2A'.format(API_BASE) in users_link
assert '/{}search/files/?q=%2A'.format(API_BASE) in files_link
assert '/{}search/projects/?q=%2A'.format(API_BASE) in projects_link
assert '/{}search/components/?q=%2A'.format(
API_BASE) in components_link
assert '/{}search/registrations/?q=%2A'.format(
API_BASE) in registrations_link
# test_search_fields_links_with_query
url = '{}?q=science'.format(url_search)
res = app.get(url)
assert res.status_code == 200
search_fields = res.json['search_fields']
users_link = search_fields['users']['related']['href']
files_link = search_fields['files']['related']['href']
projects_link = search_fields['projects']['related']['href']
components_link = search_fields['components']['related']['href']
registrations_link = search_fields['registrations']['related']['href']
assert '/{}search/users/?q=science'.format(API_BASE) in users_link
assert '/{}search/files/?q=science'.format(API_BASE) in files_link
assert '/{}search/projects/?q=science'.format(
API_BASE) in projects_link
assert '/{}search/components/?q=science'.format(
API_BASE) in components_link
assert '/{}search/registrations/?q=science'.format(
API_BASE) in registrations_link
class TestSearchComponents(ApiSearchTestCase):
@pytest.fixture()
def url_component_search(self):
return '/{}search/components/'.format(API_BASE)
def test_search_components(
self, app, url_component_search, user, user_one, user_two,
component, component_public, component_private):
# test_search_public_component_no_auth
res = app.get(url_component_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert component_public.title in res
assert component.title in res
# test_search_public_component_auth
res = app.get(url_component_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert component_public.title in res
assert component.title in res
# test_search_public_component_contributor
res = app.get(url_component_search, auth=user_two)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert component_public.title in res
assert component.title in res
# test_search_private_component_no_auth
res = app.get(url_component_search)
assert res.status_code == 200
assert component_private.title not in res
# test_search_private_component_auth
res = app.get(url_component_search, auth=user)
assert res.status_code == 200
assert component_private.title not in res
# test_search_private_component_contributor
res = app.get(url_component_search, auth=user_two)
assert res.status_code == 200
assert component_private.title not in res
# test_search_component_by_title
url = '{}?q={}'.format(url_component_search, 'beam')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert component_public.title == res.json['data'][0]['attributes']['title']
# test_search_component_by_description
url = '{}?q={}'.format(url_component_search, 'speak')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert component_public.title == res.json['data'][0]['attributes']['title']
# test_search_component_by_tags
url = '{}?q={}'.format(url_component_search, 'trumpets')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert component_public.title == res.json['data'][0]['attributes']['title']
# test_search_component_by_contributor
url = '{}?q={}'.format(url_component_search, 'Chance')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert component_public.title == res.json['data'][0]['attributes']['title']
# test_search_component_no_results
url = '{}?q={}'.format(url_component_search, 'Ocean')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 0
assert total == 0
# test_search_component_bad_query
url = '{}?q={}'.format(
url_component_search,
'www.spam.com/help/twitter/')
res = app.get(url, expect_errors=True)
assert res.status_code == 400
class TestSearchFiles(ApiSearchTestCase):
@pytest.fixture()
def url_file_search(self):
return '/{}search/files/'.format(API_BASE)
def test_search_files(
self, app, url_file_search, user, user_one,
file_public, file_component, file_private):
# test_search_public_file_no_auth
res = app.get(url_file_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert file_public.name in res
assert file_component.name in res
# test_search_public_file_auth
res = app.get(url_file_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert file_public.name in res
assert file_component.name in res
# test_search_public_file_contributor
res = app.get(url_file_search, auth=user_one)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert file_public.name in res
assert file_component.name in res
# test_search_private_file_no_auth
res = app.get(url_file_search)
assert res.status_code == 200
assert file_private.name not in res
# test_search_private_file_auth
res = app.get(url_file_search, auth=user)
assert res.status_code == 200
assert file_private.name not in res
# test_search_private_file_contributor
res = app.get(url_file_search, auth=user_one)
assert res.status_code == 200
assert file_private.name not in res
# test_search_file_by_name
url = '{}?q={}'.format(url_file_search, 'highlights')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert file_component.name == res.json['data'][0]['attributes']['name']
class TestSearchProjects(ApiSearchTestCase):
@pytest.fixture()
def url_project_search(self):
return '/{}search/projects/'.format(API_BASE)
def test_search_projects(
self, app, url_project_search, user, user_one,
user_two, project, project_public, project_private):
# test_search_public_project_no_auth
res = app.get(url_project_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert project_public.title in res
assert project.title in res
# test_search_public_project_auth
res = app.get(url_project_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert project_public.title in res
assert project.title in res
# test_search_public_project_contributor
res = app.get(url_project_search, auth=user_one)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert project_public.title in res
assert project.title in res
# test_search_private_project_no_auth
res = app.get(url_project_search)
assert res.status_code == 200
assert project_private.title not in res
# test_search_private_project_auth
res = app.get(url_project_search, auth=user)
assert res.status_code == 200
assert project_private.title not in res
# test_search_private_project_contributor
res = app.get(url_project_search, auth=user_two)
assert res.status_code == 200
assert project_private.title not in res
# test_search_project_by_title
url = '{}?q={}'.format(url_project_search, 'pablo')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert project_public.title == res.json['data'][0]['attributes']['title']
# test_search_project_by_description
url = '{}?q={}'.format(url_project_search, 'genius')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert project_public.title == res.json['data'][0]['attributes']['title']
# test_search_project_by_tags
url = '{}?q={}'.format(url_project_search, 'Yeezus')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert project_public.title == res.json['data'][0]['attributes']['title']
# test_search_project_by_contributor
url = '{}?q={}'.format(url_project_search, 'kanye')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert project_public.title in res
assert project.title in res
# test_search_project_no_results
url = '{}?q={}'.format(url_project_search, 'chicago')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 0
assert total == 0
# test_search_project_bad_query
url = '{}?q={}'.format(
url_project_search,
'www.spam.com/help/facebook/')
res = app.get(url, expect_errors=True)
assert res.status_code == 400
@pytest.mark.django_db
class TestSearchRegistrations(ApiSearchTestCase):
@pytest.fixture()
def url_registration_search(self):
return '/{}search/registrations/'.format(API_BASE)
@pytest.fixture()
def schema(self):
schema = MetaSchema.objects.filter(
name='Replication Recipe (Brandt et al., 2013): Post-Completion',
schema_version=LATEST_SCHEMA_VERSION).first()
return schema
@pytest.fixture()
def registration(self, project, schema):
with mock_archive(project, autocomplete=True, autoapprove=True, schema=schema) as registration:
return registration
@pytest.fixture()
def registration_public(self, project_public, schema):
with mock_archive(project_public, autocomplete=True, autoapprove=True, schema=schema) as registration_public:
return registration_public
@pytest.fixture()
def registration_private(self, project_private, schema):
with mock_archive(project_private, autocomplete=True, autoapprove=True, schema=schema) as registration_private:
registration_private.is_public = False
registration_private.save()
# TODO: This shouldn't be necessary, but tests fail if we don't do
# this. Investigate further.
registration_private.update_search()
return registration_private
def test_search_registrations(
self, app, url_registration_search, user, user_one, user_two,
registration, registration_public, registration_private):
# test_search_public_registration_no_auth
res = app.get(url_registration_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert registration_public.title in res
assert registration.title in res
# test_search_public_registration_auth
res = app.get(url_registration_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert registration_public.title in res
assert registration.title in res
# test_search_public_registration_contributor
res = app.get(url_registration_search, auth=user_one)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert registration_public.title in res
assert registration.title in res
# test_search_private_registration_no_auth
res = app.get(url_registration_search)
assert res.status_code == 200
assert registration_private.title not in res
# test_search_private_registration_auth
res = app.get(url_registration_search, auth=user)
assert res.status_code == 200
assert registration_private.title not in res
# test_search_private_registration_contributor
res = app.get(url_registration_search, auth=user_two)
assert res.status_code == 200
assert registration_private.title not in res
# test_search_registration_by_title
url = '{}?q={}'.format(url_registration_search, 'graduation')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert registration.title == res.json['data'][0]['attributes']['title']
# test_search_registration_by_description
url = '{}?q={}'.format(url_registration_search, 'crazy')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert registration_public.title == res.json['data'][0]['attributes']['title']
# test_search_registration_by_tags
url = '{}?q={}'.format(url_registration_search, 'yeezus')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert registration_public.title == res.json['data'][0]['attributes']['title']
# test_search_registration_by_contributor
url = '{}?q={}'.format(url_registration_search, 'west')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 2
assert total == 2
assert registration_public.title in res
assert registration.title in res
# test_search_registration_no_results
url = '{}?q={}'.format(url_registration_search, '79th')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 0
assert total == 0
# test_search_registration_bad_query
url = '{}?q={}'.format(
url_registration_search,
'www.spam.com/help/snapchat/')
res = app.get(url, expect_errors=True)
assert res.status_code == 400
class TestSearchUsers(ApiSearchTestCase):
@pytest.fixture()
def url_user_search(self):
return '/{}search/users/'.format(API_BASE)
@pytest.mark.enable_quickfiles_creation
def test_search_user(self, app, url_user_search, user, user_one, user_two):
# test_search_users_no_auth
res = app.get(url_user_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 3
assert total == 3
assert user.fullname in res
# test_search_users_auth
res = app.get(url_user_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 3
assert total == 3
assert user.fullname in res
# test_search_users_by_given_name
url = '{}?q={}'.format(url_user_search, 'Kanye')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert user_one.given_name == res.json['data'][0]['attributes']['given_name']
# test_search_users_by_middle_name
url = '{}?q={}'.format(url_user_search, 'Omari')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert user_one.middle_names[0] == res.json['data'][0]['attributes']['middle_names'][0]
# test_search_users_by_family_name
url = '{}?q={}'.format(url_user_search, 'West')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert user_one.family_name == res.json['data'][0]['attributes']['family_name']
# test_search_users_by_job
url = '{}?q={}'.format(url_user_search, 'producer')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert user_one.fullname == res.json['data'][0]['attributes']['full_name']
# test_search_users_by_school
url = '{}?q={}'.format(url_user_search, 'Chicago')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert user_one.fullname == res.json['data'][0]['attributes']['full_name']
class TestSearchInstitutions(ApiSearchTestCase):
@pytest.fixture()
def url_institution_search(self):
return '/{}search/institutions/'.format(API_BASE)
def test_search_institutions(
self, app, url_institution_search, user, institution):
# test_search_institutions_no_auth
res = app.get(url_institution_search)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert institution.name in res
# test_search_institutions_auth
res = app.get(url_institution_search, auth=user)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert institution.name in res
# test_search_institutions_by_name
url = '{}?q={}'.format(url_institution_search, 'Social')
res = app.get(url)
assert res.status_code == 200
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert num_results == 1
assert total == 1
assert institution.name == res.json['data'][0]['attributes']['name']
class TestSearchCollections(ApiSearchTestCase):
def post_payload(self, *args, **kwargs):
return {
'data': {
'attributes': kwargs
},
'type': 'search'
}
@pytest.fixture()
def url_collection_search(self):
return '/{}search/collections/'.format(API_BASE)
@pytest.fixture()
def node_one(self, user):
return NodeFactory(title='Ismael Lo: Tajabone', creator=user, is_public=True)
@pytest.fixture()
def node_two(self, user):
return NodeFactory(title='Sambolera', creator=user, is_public=True)
@pytest.fixture()
def node_private(self, user):
return NodeFactory(title='Classified', creator=user)
@pytest.fixture()
def node_with_abstract(self, user):
node_with_abstract = NodeFactory(title='Sambolera', creator=user, is_public=True)
node_with_abstract.set_description(
'Sambolera by Khadja Nin',
auth=Auth(user),
save=True)
return node_with_abstract
def test_search_collections(
self, app, url_collection_search, user, node_one, node_two, collection_public,
node_with_abstract, node_private):
collection_public.collect_object(node_one, user)
collection_public.collect_object(node_two, user)
collection_public.collect_object(node_private, user)
# test_search_collections_no_auth
res = app.get(url_collection_search)
assert res.status_code == 200
total = res.json['links']['meta']['total']
num_results = len(res.json['data'])
assert total == 2
assert num_results == 2
# test_search_collections_auth
res = app.get(url_collection_search, auth=user)
assert res.status_code == 200
total = res.json['links']['meta']['total']
num_results = len(res.json['data'])
assert total == 2
assert num_results == 2
# test_search_collections_by_submission_title
url = '{}?q={}'.format(url_collection_search, 'Ismael')
res = app.get(url)
assert res.status_code == 200
total = res.json['links']['meta']['total']
num_results = len(res.json['data'])
assert node_one.title == res.json['data'][0]['embeds']['guid']['data']['attributes']['title']
assert total == 1
assert num_results == 1
# test_search_collections_by_submission_abstract
collection_public.collect_object(node_with_abstract, user)
url = '{}?q={}'.format(url_collection_search, 'KHADJA')
res = app.get(url)
assert res.status_code == 200
total = res.json['links']['meta']['total']
assert node_with_abstract.description == res.json['data'][0]['embeds']['guid']['data']['attributes']['description']
assert total == 1
# test_search_collections_no_results:
url = '{}?q={}'.format(url_collection_search, 'Wale Watu')
res = app.get(url)
assert res.status_code == 200
total = res.json['links']['meta']['total']
assert total == 0
def test_POST_search_collections(
self, app, url_collection_search, user, node_one, node_two, collection_public,
node_with_abstract, node_private):
collection_public.collect_object(node_one, user, status='asdf')
collection_public.collect_object(node_two, user, collected_type='asdf', status='lkjh')
collection_public.collect_object(node_with_abstract, user, status='asdf')
collection_public.collect_object(node_private, user, status='asdf', collected_type='asdf')
# test_search_empty
payload = self.post_payload()
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 3
assert len(res.json['data']) == 3
# test_search_title_keyword
payload = self.post_payload(q='Ismael')
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert len(res.json['data']) == 1
# test_search_abstract_keyword
payload = self.post_payload(q='Khadja')
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == node_with_abstract._id
# test_search_filter
payload = self.post_payload(status='asdf')
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
assert len(res.json['data']) == 2
payload = self.post_payload(status=['asdf', 'lkjh'])
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 3
assert len(res.json['data']) == 3
payload = self.post_payload(collectedType='asdf')
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == node_two._id
# test_search_abstract_keyword_and_filter
payload = self.post_payload(q='Khadja', status='asdf')
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == node_with_abstract._id
# test_search_abstract_keyword_and_filter_provider
payload = self.post_payload(q='Khadja', status='asdf', provider=collection_public.provider._id)
res = app.post_json_api(url_collection_search, payload)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == node_with_abstract._id
| |
from __future__ import print_function
from __future__ import division
import ast
import numpy as np
import nibabel as nib
def warp_ssd(in1, in2, template, output):
"""
Computes the SSD between two warps, using template for header info.
"""
ssd = np.sum((nib.load(in1).get_data() - nib.load(in2).get_data())**2, 4).squeeze()
template_nii = nib.load(template)
out = nib.Nifti1Image(ssd, header=template_nii.get_header(), affine=template_nii.get_affine())
out.to_filename(output)
def _masked_threshold_arr(in_arr, threshold, label_arr, exclude_arr, direction, *labels):
labels = map(int, labels)
if len(labels) == 0:
labels = [1]
mask_arr = np.zeros(label_arr.shape)
for label in labels:
mask_arr = np.logical_or(mask_arr, label_arr == label)
mask_arr = np.logical_and(mask_arr, np.logical_not(exclude_arr))
if direction == 'greater':
binary = in_arr > threshold
elif direction == 'less':
binary = in_arr < threshold
if len(binary.shape) == 4:
binary = binary[:,:,:,0]
return np.logical_and(mask_arr, binary)
def ssd(in1, in2, output):
first = nib.load(in1).get_data()
second = nib.load(in2).get_data()
if first.shape == (256,256,256) and second.shape == (170,170,170):
ssd = np.sum((first[43:-43,43:-43,43:-43]- second)**2)
else:
ssd = np.sum((first- second)**2)
with open(output, 'w') as f:
f.write(repr(ssd))
def gaussian_blur(infile, outfile, sigma):
from scipy.ndimage.filters import gaussian_filter
if type(sigma) is str:
sigma = ast.literal_eval(sigma)
in_nii = nib.load(infile)
in_arr = in_nii.get_data()
out_arr = np.zeros_like(in_arr)
if len(in_arr.shape) == 5:
assert in_arr.shape[3] == 1
assert in_arr.shape[4] == 3
# Warp: blur x,y,z separately
for i in xrange(3):
gaussian_filter(
in_arr[:,:,:,0,i],
sigma=sigma,
output=out_arr[:,:,:,0,i])
elif len(in_arr.shape) == 3:
gaussian_filter(
in_arr[:,:,:],
sigma=sigma,
output=out_arr[:,:,:])
out_nii = nib.Nifti1Image(out_arr, header=in_nii.get_header(), affine=in_nii.get_affine())
out_nii.to_filename(outfile)
def _masked_threshold(infile, threshold, outfile, mode='scalar', excludefile='-',
labelfile='-', direction='greater', units='mm', *labels):
"""
Counts how many voxels are above/below a threshold.
Inputs
------
infile : filename with the input image
threshold : numeric threshold
outfile : a file to write to. If mode is 'scalar', writes a number.
Otherwise, writes a nifti volume
mode : 'scalar' (writes a single number with the total volume)
or 'nii' (writes a binary mask with the result of thresholding)
excludefile: filename of a binary mask to exclude from the results.
use '-' for no mask
labelfile : filename of a mask/labelmap to count within. use '-' for no mask
direction : 'greater' or 'less'. 'greater' counts intensities
above threshold, and 'less' counts intensities
below.
units : either 'mm' (writes results in mm^3) or 'voxels'
"""
if type(threshold) is str:
threshold = ast.literal_eval(threshold)
nii = nib.load(infile)
data = nii.get_data()
if labelfile == '-':
label_arr = np.ones(data.shape)
else:
label_arr = nib.load(labelfile).get_data()
if excludefile == '-':
exclude_arr = np.zeros(data.shape)
else:
exclude_arr = nib.load(excludefile).get_data()
if label_arr.shape != exclude_arr.shape:
# Sometimes one might have an extra singleton dimension which messes things
# up down the road. If removing singleton dimensions helps, then just do it
data_, label_, exclude_ = [np.squeeze(a) for a in (data, label_arr, exclude_arr)]
if data_.shape == label_.shape == exclude_.shape:
data = data_
exclude_arr = exclude_
label_arr = label_
else:
raise ValueError("Input file dimensions don't match: %s %s %s" %
(data.shape, label_arr.shape, exclude_arr.shape))
vol = _masked_threshold_arr(data, threshold, label_arr, exclude_arr, direction, *labels)
if mode == 'scalar':
count = vol.sum()
if units == 'mm':
count *= nii.get_header()['pixdim'][1:4].prod()
elif units != 'voxels':
raise ValueError("I expected units in mm or voxels, but you asked for " + units)
with open(outfile, 'w') as f:
f.write(str(count))
else:
out = nib.Nifti1Image(vol, header=nii.get_header(),
affine=nii.get_affine())
out.to_filename(outfile)
def masked_threshold(infile, threshold, outfile, excludefile='-',
labelfile='-', direction='greater', *labels):
"""
Thresholds a volume within a mask, and saves a new binary
mask of voxels that are above/below the threshold.
Inputs
------
infile : filename with the input image
threshold : numeric threshold
outfile : a file to write the output image to
labelfile : filename of a mask/labelmap to count within. use '-' for no mask
direction : 'greater' or 'less'. 'greater' counts intensities
above threshold, and 'less' counts intensities below.
labels : labels within which to do thresholding
"""
_masked_threshold(infile, threshold, outfile, 'nii', excludefile,
labelfile, direction, 'mm', *labels)
def masked_threshold_count(infile, threshold, outfile, excludefile='-',
labelfile='-', direction='greater', units='mm', *labels):
"""
Thresholds a volume within a mask, and saves how much
of the volume is above/below the threshold.
Inputs
------
infile : filename with the input image
threshold : numeric threshold
outfile : a text file to write the output value to
labelfile : filename of a mask/labelmap to count within. use '-' for no mask
direction : 'greater' or 'less'. 'greater' counts intensities
above threshold, and 'less' counts intensities below.
units : either 'mm' (writes results in mm^3) or 'voxels'
labels : labels within which to do thresholding
"""
_masked_threshold(infile, threshold, outfile, 'scalar', excludefile,
labelfile, direction, units, *labels)
def mask(infile, maskfile, outfile):
"""
Given a binary mask (maskfile) and an input image (infile),
sets all areas outside the mask (i.e., where the mask image is 0)
to be 0, and saves the result to outfile.
Inputs
------
infile : filename with the input image
maskfile : filename with the binary mask image
outfile : filename to save the result. All voxels in the input where
the mask=0 are set to 0, and the rest are left as they are.
"""
inp = nib.load(infile)
mask = nib.load(maskfile)
binary_mask = (mask.get_data() == 0)
masked = inp.get_data()
masked[binary_mask] = 0
out = nib.Nifti1Image(masked, header=inp.get_header(), affine=inp.get_affine())
out.to_filename(outfile)
def crop_to_bounding_box(infile, outfile):
"""
Crops the volume in infile to locations where it is nonzero.
Prints the resulting bounding box in the following format:
xmin ymin zmin xmax ymax zmax
"""
nii = nib.load(infile)
aff = nii.get_affine()
data = nii.get_data()
minmaxes = []
slicing = []
for axis, otheraxes in enumerate([[2,1], [2,0], [1,0]]):
one_axis = np.apply_over_axes(np.sum, data, otheraxes).squeeze()
# hack because image has a weird bright patch
(nonzero_locs,) = np.where(one_axis)
minmaxes.append((nonzero_locs.min(), nonzero_locs.max()))
minima = [int(min) for (min, max) in minmaxes]
maxima = [int(max) for (min, max) in minmaxes]
slicing = [slice(min, max, None) for (min, max) in minmaxes]
aff[:3, -1] += minima
out = nib.Nifti1Image(data[slicing], header=nii.get_header(), affine=aff)
out.update_header()
out.to_filename(outfile)
print(" ".join(map(str,minima)))
print(" ".join(map(str,maxima)))
def pad(niiFileName, paddedNiiFileName, maskNiiFileName, padAmountMM='30'):
"""
pad a nifti and save the nifti and the relevant mask.
Example arguments:
niiFileName = '10529_t1.nii.gz'
paddedNiiFileName = 'padded_10529_t1.nii.gz'
maskNiiFileName = 'padded_10529_t1_mask.nii.gz'
padAmountMM = '30'; [default]
"""
# padding amount in mm
padAmountMM = int(padAmountMM)
# load the nifti
nii = nib.load(niiFileName)
# get the amount of padding in voxels
pixdim = nii.get_header()['pixdim'][1:4]
padAmount = np.ceil(padAmountMM / pixdim)
dims = nii.get_header()['dim'][1:4]
assert np.all(dims.shape == padAmount.shape)
newDims = dims + padAmount * 2
# compute where the center is for padding
center = newDims/2
starts = np.round(center - dims/2)
ends = starts + dims
# compute a slice object with the start/end of the center subvolume
slicer = [slice(start, end) for (start, end) in zip(starts, ends)]
# set the subvolume in the center of the image w/the padding around it
vol = np.zeros(newDims)
vol[slicer] = nii.get_data()
volMask = np.zeros(newDims)
volMask[slicer] = np.ones(dims)
# update affine
affine = nii.get_affine()
affine[:3, 3] -= padAmountMM
# create niftis
newNii = nib.Nifti1Image(vol, header=nii.get_header(), affine=affine)
newNiiMask = nib.Nifti1Image(volMask, header=nii.get_header(), affine=affine)
# save niftis
newNii.to_filename(paddedNiiFileName)
newNiiMask.to_filename(maskNiiFileName)
return (newNii, newNiiMask)
def trim_bounding_box(infile, outfile, xmin, ymin, zmin, xmax, ymax, zmax):
"""
Trims the input volume using the specified bounding box.
"""
slicers = [slice(xmin, xmax), slice(ymin, ymax), slice(zmin, zmax)]
trim_slicing(infile, outfile, slicers, update_headers=True)
def trim_slicing(infile, outfile, slices=None, update_headers=True):
"""
Trims the input volume using the list of slice objects provided and
saves the result.
Inputs
------
infile : a filename from which to read data
outfile : a filename to which to save data
slices : a list of slice objects. Defaults to values that usually work
well for 3D 1mm-resolution brain MRI data.
SEE ALSO: slicer, SliceMaker.
Examples
--------
The following two examples are equivalent. Both trim the x
dimension and y dimension and downsample the y dimension,
while leaving the z dimension untouched.
trim_slicing('in.nii.gz', 'out.nii.gz', [slice(30,200), slice(20, 280, 2), None])
trim_slicing('in.nii.gz', 'out.nii.gz', niitools.slicer[30:200, 20:280,2, :])
"""
if slices is None:
slices = [slice(49,211), slice(22,220), slice(38,183)]
inp = nib.load(infile)
aff = inp.get_affine()
if update_headers:
# modify origin according to affine * what we added
aff[:3, -1] += np.dot(aff[:3,:3], np.array([s.start for s in slices]))
out = nib.Nifti1Image(inp.get_data()[slices], header=inp.get_header(), affine=aff)
out.to_filename(outfile)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import traceback
import platform
import random
import socket
import netifaces
from calvin.utilities import calvinlogger
from calvin.runtime.south.plugins.storage.twistedimpl.dht.service_discovery import ServiceDiscoveryBase
from twisted.internet.protocol import DatagramProtocol
from twisted.web.http import datetimeToString
from twisted.internet import reactor, defer
_log = calvinlogger.get_logger(__name__)
SSDP_ADDR = '239.255.255.250'
SSDP_PORT = 1900
__version_info__ = (0, 6, 7)
__version__ = '.'.join(map(str, __version_info__))
SERVER_ID = ','.join([platform.system(),
platform.release(),
'UPnP/1.0,Calvin UPnP framework',
__version__])
SERVICE_UUID = '1693326a-abb9-11e4-8dfb-9cb654a16426'
MS = ('M-SEARCH * HTTP/1.1\r\nHOST: %s:%d\r\nMAN: "ssdp:discover"\r\n' +
'MX: 2\r\nST: uuid:%s\r\n\r\n') %\
(SSDP_ADDR, SSDP_PORT, SERVICE_UUID)
MS_RESP = 'HTTP/1.1 200 OK\r\n' + \
'USN: %s::upnp:rootdevice\r\n' % SERVICE_UUID + \
'SERVER: %s\r\nlast-seen: %s\r\nEXT: \r\nSERVICE: %s\r\n' + \
'LOCATION: http://calvin@github.se/%s/description-0.0.1.xml\r\n' % SERVICE_UUID + \
'CACHE-CONTROL: max-age=1800\r\nST: uuid:%s\r\n' % SERVICE_UUID + \
'DATE: %s\r\n'
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
class ServerBase(DatagramProtocol):
def __init__(self, ips, cert=None, d=None):
self._services = {}
self._dstarted = d
self.ignore_list = []
self.ips = ips
self.cert = cert
def startProtocol(self):
if self._dstarted:
reactor.callLater(0, self._dstarted.callback, True)
def datagramReceived(self, datagram, address):
# Broadcast
try:
cmd, headers = parse_http_response(datagram)
_log.debug("Received %s, %s from %r" % (cmd, headers, address, ))
if cmd[0] == 'M-SEARCH' and cmd[1] == '*':
_log.debug("Ignore list %s ignore %s" % (self.ignore_list, address not in self.ignore_list))
# Only reply to our requests
if SERVICE_UUID in headers['st'] and address not in self.ignore_list:
for k, addrs in self._services.items():
for addr in addrs:
# Only tell local about local
if addr[0] == "127.0.0.1" and address[0] != "127.0.0.1":
continue
response = MS_RESP % ('%s:%d' % addr, str(time.time()),
k, datetimeToString())
if self.cert != None:
response = "{}CERTIFICATE: ".format(response)
response = "{}{}".format(response, self.cert)
response = "{}\r\n\r\n".format(response)
_log.debug("Sending response: %s" % repr(response))
delay = random.randint(0, min(5, int(headers['mx'])))
reactor.callLater(delay, self.send_it,
response, address)
except:
_log.exception("Error datagram received")
def add_service(self, service, ip, port):
# Service on all interfaces
if ip in ["0.0.0.0", ""]:
self._services[service] = []
for a in self.ips:
_log.debug("Add service %s, %s:%s" % (service, a, port))
self._services[service].append((a, port))
else:
_log.debug("Add service %s, %s:%s" % (service, ip, port))
self._services[service] = [(ip, port)]
def remove_service(self, service):
if service in self._services:
del self._services[service]
def set_ignore_list(self, list_):
self.ignore_list = list_
def send_it(self, response, destination):
try:
if self.transport:
self.transport.write(response, destination)
else:
_log.debug("No transport yet!")
except (AttributeError, socket.error), msg:
_log.exception("Error in send %s" % repr(msg))
def stop(self):
pass
class ClientBase(DatagramProtocol):
def __init__(self, d=None):
self._service = None
self._mserach_cb = None
self._msearch_stopped = False
self._msearch_stop = False
self._dstarted = d
self._msearch_cb = None
def startProtocol(self):
if self._dstarted:
reactor.callLater(0, self._dstarted.callback, True)
def datagramReceived(self, datagram, address):
# Broadcast
cmd, headers = parse_http_response(datagram)
if cmd[0].startswith('HTTP/1.') and cmd[1] == '200':
_log.debug("Received %s from %r" % (headers, address, ))
if SERVICE_UUID in headers['st']:
c_address = headers['server'].split(':')
c_address[1] = int(c_address[1])
try:
cert = headers['certificate'].split(':')
c_address.extend(cert)
except KeyError:
pass
# Filter on service calvin networks
if self._service is None or \
self._service == headers['service']:
_log.debug("Received service %s from %s" %
(headers['service'], c_address, ))
if c_address:
if self._msearch_cb:
self._msearch_cb([tuple(c_address)])
if self._msearch_stop:
self.stop()
def set_callback(self, callback):
self._msearch_cb = callback
def set_service(self, service):
self._service = service
def is_stopped(self):
return self._msearch_stopped
def set_autostop(self, stop=True):
self._msearch_stop = stop
def stop(self):
self._msearch_stopped = True
class SSDPServiceDiscovery(ServiceDiscoveryBase):
def __init__(self, iface='', cert=None, ignore_self=True):
super(SSDPServiceDiscovery, self).__init__()
self.ignore_self = ignore_self
self.iface = '' #iface
self.ssdp = None
self.port = None
self._backoff = .2
self.iface_send_list = []
self.cert = cert
if self.iface in ["0.0.0.0", ""]:
for a in netifaces.interfaces():
addrs = netifaces.ifaddresses(a)
# Ipv4 for now
if netifaces.AF_INET in addrs:
for a in addrs[netifaces.AF_INET]:
self.iface_send_list.append(a['addr'])
else:
self.iface_send_list.append(iface)
def start(self):
dserver = defer.Deferred()
dclient = defer.Deferred()
try:
self.ssdp = reactor.listenMulticast(SSDP_PORT, ServerBase(self.iface_send_list, cert=self.cert, d=dserver),
interface=self.iface, listenMultiple=True)
self.ssdp.setLoopbackMode(1)
self.ssdp.joinGroup(SSDP_ADDR, interface=self.iface)
except:
_log.exception("Multicast listen join failed!!")
# Dont start server some one is alerady running locally
# TODO: Do we need this ?
self.port = reactor.listenMulticast(0, ClientBase(d=dclient), interface=self.iface)
_log.debug("SSDP Host: %s" % repr(self.port.getHost()))
# Set ignore port and ips
if self.ssdp and self.ignore_self:
self.ssdp.protocol.set_ignore_list([(x, self.port.getHost().port) for x in self.iface_send_list])
return dserver, dclient
def start_search(self, callback=None, stop=False):
# Restart backoff
self._backoff = .2
def local_start_msearch(stop):
self.port.protocol.set_callback(callback)
self.port.protocol.set_autostop(stop)
self._send_msearch(once=False)
reactor.callLater(0, local_start_msearch, stop=stop)
def stop_search(self):
self.port.protocol.set_callback(None)
self.port.protocol.stop()
def set_client_filter(self, service):
self.port.protocol.set_service(service)
def register_service(self, service, ip, port):
self.ssdp.protocol.add_service(service, ip, port)
def unregister_service(self, service):
self.ssdp.protocol.remove_service(service)
def _send_msearch(self, once=True):
if self.port:
for src_ip in self.iface_send_list:
self.port.protocol.transport.setOutgoingInterface(src_ip)
_log.debug("Sending M-SEARCH... on %s", src_ip)
self.port.write(MS, (SSDP_ADDR, SSDP_PORT))
if not once and not self.port.protocol.is_stopped():
reactor.callLater(self._backoff, self._send_msearch, once=False)
_log.debug("Next M-SEARCH in %s seconds" % self._backoff)
self._backoff = min(600, self._backoff * 1.5)
else:
_log.debug(traceback.format_stack())
def search(self):
self._send_msearch(once=True)
def stop(self):
dlist = []
if self.ssdp:
dlist.append(self.ssdp.leaveGroup(SSDP_ADDR, interface=self.iface))
dlist.append(self.ssdp.stopListening())
self.ssdp = None
if self.port:
self.stop_search()
dlist.append(self.port.stopListening())
self.port = None
return defer.DeferredList(dlist)
| |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Policy model (v002_markovian_film)
- Contains the policy model (v002_markovian_film), to evaluate the best actions given a state
"""
import logging
from diplomacy_research.models.policy.token_based import TokenBasedPolicyModel, load_args as load_parent_args
from diplomacy_research.models.state_space import get_adjacency_matrix, VOCABULARY_SIZE, TOKENS_PER_ORDER, \
NB_SUPPLY_CENTERS, NB_POWERS
# Constants
LOGGER = logging.getLogger(__name__)
def load_args():
""" Load possible arguments
:return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
"""
return load_parent_args() + [
# Hyperparameters
('int', 'nb_graph_conv', 8, 'Number of Graph Conv Layer'),
('int', 'word_emb_size', 400, 'Word embedding size.'),
('int', 'power_emb_size', 64, 'Power embedding size.'),
('int', 'gcn_size', 80, 'Size of graph convolution outputs.'),
('int', 'lstm_size', 400, 'LSTM (Encoder and Decoder) size.'),
('int', 'attn_size', 80, 'LSTM decoder attention size.'),
]
class PolicyModel(TokenBasedPolicyModel):
""" Policy Model """
def _encode_board(self, board_state, name, reuse=None):
""" Encodes a board state or prev orders state
:param board_state: The board state / prev orders state to encode - (batch, NB_NODES, initial_features)
:param name: The name to use for the encoding
:param reuse: Whether to reuse or not the weights from another encoding operation
:return: The encoded board state / prev_orders state
"""
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.graph_convolution import film_gcn_res_block, preprocess_adjacency
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
relu = tf.nn.relu
# Getting film gammas and betas
film_gammas = self.outputs['_%s_film_gammas' % name]
film_betas = self.outputs['_%s_film_betas' % name]
# Computing norm adjacency
norm_adjacency = preprocess_adjacency(get_adjacency_matrix())
norm_adjacency = tf.tile(tf.expand_dims(norm_adjacency, axis=0), [tf.shape(board_state)[0], 1, 1])
# Building scope
scope = tf.VariableScope(name='policy/%s' % name, reuse=reuse)
with tf.variable_scope(scope):
# Adding noise to break symmetry
board_state = board_state + tf.random_normal(tf.shape(board_state), stddev=0.01)
graph_conv = tf.layers.Dense(units=hps('gcn_size'), activation=relu)(board_state)
# First and intermediate layers
for layer_idx in range(hps('nb_graph_conv') - 1):
graph_conv = film_gcn_res_block(inputs=graph_conv, # (b, NB_NODES, gcn_size)
gamma=film_gammas[layer_idx],
beta=film_betas[layer_idx],
gcn_out_dim=hps('gcn_size'),
norm_adjacency=norm_adjacency,
is_training=pholder('is_training'),
residual=True)
# Last layer
graph_conv = film_gcn_res_block(inputs=graph_conv, # (b, NB_NODES, attn_size)
gamma=film_gammas[-1],
beta=film_betas[-1],
gcn_out_dim=hps('attn_size'),
norm_adjacency=norm_adjacency,
is_training=pholder('is_training'),
residual=False)
# Returning
return graph_conv
def _get_board_state_conv(self, board_0yr_conv, is_training, prev_ord_conv=None):
""" Computes the board state conv to use as the attention target (memory)
:param board_0yr_conv: The board state encoding of the current (present) board state)
:param is_training: Indicate whether we are doing training or inference
:param prev_ord_conv: Optional. The encoding of the previous orders state.
:return: The board state conv to use as the attention target (memory)
"""
return board_0yr_conv
def _build_policy_initial(self):
""" Builds the policy model (initial step) """
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.initializers import uniform
from diplomacy_research.utils.tensorflow import build_sparse_batched_tensor, pad_axis, to_float, to_bool
if not self.placeholders:
self.placeholders = self.get_placeholders()
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
# Training loop
with tf.variable_scope('policy', reuse=tf.AUTO_REUSE):
with tf.device(self.cluster_config.worker_device if self.cluster_config else None):
# Features
board_state = to_float(self.features['board_state']) # tf.flt32 - (b, NB_NODES, NB_FEATURES)
decoder_inputs = self.features['decoder_inputs'] # tf.int32 - (b, <= 1 + TOK/ORD * NB_SCS)
decoder_lengths = self.features['decoder_lengths'] # tf.int32 - (b,)
current_power = self.features['current_power'] # tf.int32 - (b,)
dropout_rates = self.features['dropout_rate'] # tf.flt32 - (b,)
# Batch size
batch_size = tf.shape(board_state)[0]
# Building decoder mask
decoder_mask_indices = self.features['decoder_mask_indices'] # tf.int64 - (b, 3 * len)
decoder_mask_shape = self.proto_fields['decoder_mask'].shape
# Overriding dropout_rates if pholder('dropout_rate') > 0
dropout_rates = tf.cond(tf.greater(pholder('dropout_rate'), 0.),
true_fn=lambda: tf.zeros_like(dropout_rates) + pholder('dropout_rate'),
false_fn=lambda: dropout_rates)
# Padding inputs
decoder_inputs = pad_axis(decoder_inputs, axis=-1, min_size=2)
decoder_mask_indices = pad_axis(decoder_mask_indices, axis=-1, min_size=len(decoder_mask_shape))
# Reshaping to (b, len, 3)
# decoder_mask is -- tf.bool (batch, TOK/ORD * NB_SC, VOCAB_SIZE, VOCAB_SIZE)
decoder_mask_indices = tf.reshape(decoder_mask_indices, [batch_size, -1, len(decoder_mask_shape)])
decoder_mask = build_sparse_batched_tensor(decoder_mask_indices,
value=True,
dtype=tf.bool,
dense_shape=decoder_mask_shape)
# Making sure all RNN lengths are at least 1
# No need to trim, because the fields are variable length
raw_decoder_lengths = decoder_lengths
decoder_lengths = tf.math.maximum(1, decoder_lengths)
# Placeholders
decoder_type = tf.reduce_max(pholder('decoder_type'))
is_training = pholder('is_training')
# Computing FiLM Gammas and Betas
with tf.variable_scope('film_scope'):
power_embedding = uniform(name='power_embedding',
shape=[NB_POWERS, hps('power_emb_size')],
scale=1.)
current_power_mask = tf.one_hot(current_power, NB_POWERS, dtype=tf.float32)
current_power_embedding = tf.reduce_sum(power_embedding[None]
* current_power_mask[:, :, None], axis=1) # (b, power_emb)
film_output_dims = [hps('gcn_size')] * (hps('nb_graph_conv') - 1) + [hps('attn_size')]
film_weights = tf.layers.Dense(units=2 * sum(film_output_dims), # (b, 1, 750)
use_bias=True,
activation=None)(current_power_embedding)[:, None, :]
film_gammas, film_betas = tf.split(film_weights, 2, axis=2) # (b, 1, 750)
film_gammas = tf.split(film_gammas, film_output_dims, axis=2)
film_betas = tf.split(film_betas, film_output_dims, axis=2)
# Storing as temporary output
self.add_output('_board_state_conv_film_gammas', film_gammas)
self.add_output('_board_state_conv_film_betas', film_betas)
# Creating graph convolution
with tf.variable_scope('graph_conv_scope'):
assert hps('nb_graph_conv') >= 2
# Encoding the board state
board_state_0yr_conv = self.encode_board(board_state, name='board_state_conv')
board_state_conv = self.get_board_state_conv(board_state_0yr_conv, is_training)
# Creating word embedding vector (to embed word_ix)
# Embeddings needs to be cached locally on the worker, otherwise TF can't compute their gradients
with tf.variable_scope('word_embedding_scope'):
# embedding: (voc_size, 256)
caching_device = self.cluster_config.caching_device if self.cluster_config else None
word_embedding = uniform(name='word_embedding',
shape=[VOCABULARY_SIZE, hps('word_emb_size')],
scale=1.,
caching_device=caching_device)
# Building output tags
outputs = {'batch_size': batch_size,
'decoder_inputs': decoder_inputs,
'decoder_mask': decoder_mask,
'decoder_type': decoder_type,
'raw_decoder_lengths': raw_decoder_lengths,
'decoder_lengths': decoder_lengths,
'board_state_conv': board_state_conv,
'board_state_0yr_conv': board_state_0yr_conv,
'word_embedding': word_embedding,
'in_retreat_phase': tf.math.logical_and( # 1) board not empty, 2) disl. units present
tf.reduce_sum(board_state[:], axis=[1, 2]) > 0,
tf.math.logical_not(to_bool(tf.reduce_min(board_state[:, :, 23], -1))))}
# Adding to graph
self.add_meta_information(outputs)
def _build_policy_final(self):
""" Builds the policy model (final step) """
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.attention import AttentionWrapper, ModifiedBahdanauAttention
from diplomacy_research.models.layers.beam_decoder import DiverseBeamSearchDecoder
from diplomacy_research.models.layers.decoder import MaskedBasicDecoder
from diplomacy_research.models.layers.dropout import SeededDropoutWrapper
from diplomacy_research.models.layers.dynamic_decode import dynamic_decode
from diplomacy_research.models.policy.token_based.helper import CustomHelper, CustomBeamHelper
from diplomacy_research.utils.tensorflow import cross_entropy, sequence_loss, to_int32, to_float, get_tile_beam
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
# Training loop
with tf.variable_scope('policy', reuse=tf.AUTO_REUSE):
with tf.device(self.cluster_config.worker_device if self.cluster_config else None):
# Features
player_seeds = self.features['player_seed'] # tf.int32 - (b,)
temperature = self.features['temperature'] # tf,flt32 - (b,)
dropout_rates = self.features['dropout_rate'] # tf.flt32 - (b,)
# Placeholders
stop_gradient_all = pholder('stop_gradient_all')
# Outputs (from initial steps)
batch_size = self.outputs['batch_size']
decoder_inputs = self.outputs['decoder_inputs']
decoder_mask = self.outputs['decoder_mask']
decoder_type = self.outputs['decoder_type']
raw_decoder_lengths = self.outputs['raw_decoder_lengths']
decoder_lengths = self.outputs['decoder_lengths']
board_state_conv = self.outputs['board_state_conv']
word_embedding = self.outputs['word_embedding']
# --- Decoding ---
with tf.variable_scope('decoder_scope', reuse=tf.AUTO_REUSE):
lstm_cell = tf.contrib.rnn.LSTMBlockCell(hps('lstm_size'))
# decoder output to token
decoder_output_layer = tf.layers.Dense(units=VOCABULARY_SIZE,
activation=None,
kernel_initializer=tf.random_normal_initializer,
use_bias=True)
# ======== Regular Decoding ========
# Applying dropout to input + attention and to output layer
decoder_cell = SeededDropoutWrapper(cell=lstm_cell,
seeds=player_seeds,
input_keep_probs=1. - dropout_rates,
output_keep_probs=1. - dropout_rates,
variational_recurrent=hps('use_v_dropout'),
input_size=hps('word_emb_size') + hps('attn_size'),
dtype=tf.float32)
# apply attention over location
# curr_state [batch, NB_NODES, attn_size]
attention_scope = tf.VariableScope(name='policy/decoder_scope/Attention', reuse=tf.AUTO_REUSE)
attention_mechanism = ModifiedBahdanauAttention(num_units=hps('attn_size'),
memory=board_state_conv,
normalize=True,
name_or_scope=attention_scope)
decoder_cell = AttentionWrapper(cell=decoder_cell,
attention_mechanism=attention_mechanism,
output_attention=False,
name_or_scope=attention_scope)
# Setting initial state
decoder_init_state = decoder_cell.zero_state(batch_size, tf.float32)
decoder_init_state = decoder_init_state.clone(attention=tf.reduce_mean(board_state_conv, axis=1))
# ---- Helper ----
helper = CustomHelper(decoder_type=decoder_type,
inputs=decoder_inputs[:, :-1],
embedding=word_embedding,
sequence_length=decoder_lengths,
mask=decoder_mask,
time_major=False,
softmax_temperature=temperature)
# ---- Decoder ----
sequence_mask = tf.sequence_mask(raw_decoder_lengths,
maxlen=tf.reduce_max(decoder_lengths),
dtype=tf.float32)
maximum_iterations = TOKENS_PER_ORDER * NB_SUPPLY_CENTERS
model_decoder = MaskedBasicDecoder(cell=decoder_cell,
helper=helper,
initial_state=decoder_init_state,
output_layer=decoder_output_layer,
extract_state=True)
training_results, _, _ = dynamic_decode(decoder=model_decoder,
output_time_major=False,
maximum_iterations=maximum_iterations,
swap_memory=hps('swap_memory'))
global_vars_after_decoder = set(tf.global_variables())
# ======== Beam Search Decoding ========
tile_beam = get_tile_beam(hps('beam_width'))
# Applying dropout to input + attention and to output layer
decoder_cell = SeededDropoutWrapper(cell=lstm_cell,
seeds=tile_beam(player_seeds),
input_keep_probs=tile_beam(1. - dropout_rates),
output_keep_probs=tile_beam(1. - dropout_rates),
variational_recurrent=hps('use_v_dropout'),
input_size=hps('word_emb_size') + hps('attn_size'),
dtype=tf.float32)
# apply attention over location
# curr_state [batch, NB_NODES, attn_size]
attention_mechanism = ModifiedBahdanauAttention(num_units=hps('attn_size'),
memory=tile_beam(board_state_conv),
normalize=True,
name_or_scope=attention_scope)
decoder_cell = AttentionWrapper(cell=decoder_cell,
attention_mechanism=attention_mechanism,
output_attention=False,
name_or_scope=attention_scope)
# Setting initial state
decoder_init_state = decoder_cell.zero_state(batch_size * hps('beam_width'), tf.float32)
decoder_init_state = decoder_init_state.clone(attention=tf.reduce_mean(tile_beam(board_state_conv),
axis=1))
# ---- Beam Helper and Decoder ----
beam_helper = CustomBeamHelper(cell=decoder_cell,
embedding=word_embedding,
mask=decoder_mask,
sequence_length=decoder_lengths,
output_layer=decoder_output_layer,
initial_state=decoder_init_state,
beam_width=hps('beam_width'))
beam_decoder = DiverseBeamSearchDecoder(beam_helper=beam_helper,
sequence_length=decoder_lengths,
nb_groups=hps('beam_groups'))
beam_results, beam_state, _ = dynamic_decode(decoder=beam_decoder,
output_time_major=False,
maximum_iterations=maximum_iterations,
swap_memory=hps('swap_memory'))
# Making sure we haven't created new global variables
assert not set(tf.global_variables()) - global_vars_after_decoder, 'New global vars were created'
# Processing results
logits = training_results.rnn_output # (b, dec_len, VOCAB_SIZE)
logits_length = tf.shape(logits)[1] # dec_len
decoder_target = decoder_inputs[:, 1:1 + logits_length]
# Selected tokens are the token that was actually fed at the next position
sample_mask = to_float(tf.math.equal(training_results.sample_id, -1))
selected_tokens = to_int32(
sequence_mask * (sample_mask * to_float(decoder_target)
+ (1. - sample_mask) * to_float(training_results.sample_id)))
# Argmax tokens are the most likely token outputted at each position
argmax_tokens = to_int32(to_float(tf.argmax(logits, axis=-1)) * sequence_mask)
log_probs = -1. * cross_entropy(logits=logits, labels=selected_tokens) * sequence_mask
# Computing policy loss
with tf.variable_scope('policy_loss'):
policy_loss = sequence_loss(logits=logits,
targets=decoder_target,
weights=sequence_mask,
average_across_batch=True,
average_across_timesteps=True)
policy_loss = tf.cond(stop_gradient_all,
lambda: tf.stop_gradient(policy_loss), # pylint: disable=cell-var-from-loop
lambda: policy_loss) # pylint: disable=cell-var-from-loop
# Building output tags
outputs = {'tag/policy/token_based/v002_markovian_film': True,
'targets': decoder_inputs[:, 1:],
'selected_tokens': selected_tokens,
'argmax_tokens': argmax_tokens,
'logits': logits,
'log_probs': log_probs,
'beam_tokens': tf.transpose(beam_results.predicted_ids, perm=[0, 2, 1]), # [batch, beam, steps]
'beam_log_probs': beam_state.log_probs,
'rnn_states': training_results.rnn_state,
'policy_loss': policy_loss,
'draw_prob': self.outputs.get('draw_prob', tf.zeros_like(self.features['draw_target'])),
'learning_rate': self.learning_rate}
# Adding features, placeholders and outputs to graph
self.add_meta_information(outputs)
| |
# -*- coding: utf-8 -*-
import os
import uuid
import logging
import tornado.httpclient
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.auth
import tornado.httpserver
import tornado.websocket
import tornado.template
from sayuri.module import model as mdl
from sayuri.module import recognizers
from sayuri.module import actions
class Application(tornado.web.Application):
observers = {}
def __init__(self):
from sayuri.env import Environment
secret_key = Environment().secret_key()
handlers = [
(r"/", IndexHandler),
(r"/home", HomeHandler),
(r"/auth/login", LoginHandler),
(r"/auth/logout", LogoutHandler),
(r"/conference", ConferenceHandler),
(r"/conference/image", ImageHandler),
(r"/sayurisocket", ClientSocketHandler),
]
settings = dict(
login_url="/auth/login",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=secret_key,
debug=True,
)
# load and store prediction model
actions.FaceAction.store_machine()
tornado.web.Application.__init__(self, handlers, **settings)
@classmethod
def add_conference(cls, user, conference_key):
if conference_key not in cls.observers:
key = MessageManager.make_client_key(user, conference_key)
instance = tornado.ioloop.IOLoop.instance()
messenger = MessageManager(user, conference_key)
cls.observers[key] = recognizers.SayuriRecognitionObserver(
instance.add_timeout, messenger.send, user, conference_key)
cls.observers[key].set_recognizer(recognizers.TimeRecognizer(),
recognizers.FaceDetectIntervalRecognizer(),
recognizers.FaceRecognizer())
cls.observers[key].set_action(actions.TimeManagementAction(),
actions.FaceDetectAction(),
actions.FaceAction())
cls.observers[key].run()
return True
else:
return False
@classmethod
def get_conference_observer(cls, key):
if key in cls.observers:
return cls.observers[key]
else:
return None
@classmethod
def remove_conference(cls, key):
if key in cls.observers:
cls.observers[key].stop()
del cls.observers[key]
return True
else:
return False
class MessageManager(object):
def __init__(self, user, conference_key):
self.client = self.make_client_key(user, conference_key)
@classmethod
def make_client_key(cls, user, conference_key):
if user and conference_key:
return user + ":" + conference_key
else:
return ""
def send(self, message):
if not message:
return False
else:
ClientSocketHandler.broadcast(self.client, message)
return True
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
def get_current_user_str(self):
return tornado.escape.to_unicode(self.get_current_user())
def get_current_conference_key(self):
return tornado.escape.to_unicode(self.get_secure_cookie(mdl.Conference.KEY))
def get_current_key(self):
return MessageManager.make_client_key(self.get_current_user_str(), self.get_current_conference_key())
class HomeHandler(BaseHandler):
def get(self):
self.render("home.html")
class LoginHandler(BaseHandler):
def get(self):
self.redirect("/home")
def post(self):
#todo implements authorization
self.set_secure_cookie("user", "Guest")
self.redirect("/")
class LogoutHandler(BaseHandler):
def get(self):
Application.remove_conference(self.get_current_key())
self.clear_cookie("user")
self.clear_cookie(mdl.Conference.KEY)
self.redirect("/home")
class IndexHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render("index.html")
class ConferenceHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
# return conference list of current use.
user = self.get_current_user_str()
conference_key = self.get_current_conference_key()
key = self.get_current_key()
cs = []
conference = ""
if user:
# show only conference of now
# cs = Conference.get_users_conference(user, 0)
pass
if conference_key:
conference = mdl.Conference.get(conference_key)
cs.append(conference)
if mdl.Conference.is_end(conference):
conference = ""
elif not Application.get_conference_observer(key):
Application.add_conference(user, conference_key)
self.write({"conference": conference, "conferences": cs})
@tornado.web.authenticated
def post(self):
# register conference and start observing client
title = self.get_argument("title")
minutes = self.get_argument("minutes")
user = self.get_current_user_str()
if self.get_current_conference_key():
self.delete()
if title and minutes and minutes.isdigit():
key = str(uuid.uuid1())
conference = mdl.Conference.to_dict(key, title, minutes)
mdl.Conference.store_to_user(user, conference)
self.set_secure_cookie(mdl.Conference.KEY, key)
Application.add_conference(user, key)
self.write({"conference": key})
else:
self.write({"conference": "", "message": "conference name or minutes is not set."})
@tornado.web.authenticated
def delete(self):
Application.remove_conference(self.get_current_key())
self.clear_cookie(mdl.Conference.KEY)
self.write({})
class ImageHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
image_data = self.get_arguments("images[]")
observer = Application.get_conference_observer(self.get_current_key())
if image_data and observer:
face = observer.get_recognizer(recognizers.FaceRecognizer)
images = []
for m in image_data:
image = m.split(",")
base64_image = image[len(image) - 1]
images.append(base64_image)
face.invoke(images)
self.write({})
class ClientSocketHandler(tornado.websocket.WebSocketHandler):
waiters = {}
@classmethod
def broadcast(cls, client, message):
if client in cls.waiters:
try:
cls.waiters[client].write_message(message)
except:
logging.error("Error sending message", exc_info=True)
def check_origin(self, origin):
return True
def open(self):
if self.get_socket_group():
ClientSocketHandler.waiters[self.get_socket_group()] = self
def on_close(self):
key = self.get_socket_group()
if key and key in ClientSocketHandler.waiters:
del ClientSocketHandler.waiters[key]
def on_message(self, message):
pass
def get_socket_group(self):
user = tornado.escape.to_unicode(self.get_secure_cookie("user"))
conference = tornado.escape.to_unicode(self.get_secure_cookie(mdl.Conference.KEY))
return MessageManager.make_client_key(user, conference)
def run(port=8443, ssl_key="", ssl_secret=""):
io = tornado.ioloop.IOLoop.instance()
application = Application()
if ssl_key and ssl_secret:
http_server = tornado.httpserver.HTTPServer(application,
ssl_options={
"keyfile": ssl_key,
"certfile": ssl_secret
})
else:
http_server = tornado.httpserver.HTTPServer(application)
_port = int(os.environ.get("PORT", port))
http_server.listen(_port)
io.start()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from helpers import unittest
import luigi
from luigi import Event, Task, build
from luigi.mock import MockTarget, MockFileSystem
from luigi.task import flatten
from mock import patch
class DummyException(Exception):
pass
class EmptyTask(Task):
fail = luigi.BoolParameter()
def run(self):
if self.fail:
raise DummyException()
class TaskWithCallback(Task):
def run(self):
print("Triggering event")
self.trigger_event("foo event")
class TestEventCallbacks(unittest.TestCase):
def test_start_handler(self):
saved_tasks = []
@EmptyTask.event_handler(Event.START)
def save_task(task):
print("Saving task...")
saved_tasks.append(task)
t = EmptyTask(True)
build([t], local_scheduler=True)
self.assertEqual(saved_tasks, [t])
def _run_empty_task(self, fail):
successes = []
failures = []
exceptions = []
@EmptyTask.event_handler(Event.SUCCESS)
def success(task):
successes.append(task)
@EmptyTask.event_handler(Event.FAILURE)
def failure(task, exception):
failures.append(task)
exceptions.append(exception)
t = EmptyTask(fail)
build([t], local_scheduler=True)
return t, successes, failures, exceptions
def test_success(self):
t, successes, failures, exceptions = self._run_empty_task(False)
self.assertEqual(successes, [t])
self.assertEqual(failures, [])
self.assertEqual(exceptions, [])
def test_failure(self):
t, successes, failures, exceptions = self._run_empty_task(True)
self.assertEqual(successes, [])
self.assertEqual(failures, [t])
self.assertEqual(len(exceptions), 1)
self.assertTrue(isinstance(exceptions[0], DummyException))
def test_custom_handler(self):
dummies = []
@TaskWithCallback.event_handler("foo event")
def story_dummy():
dummies.append("foo")
t = TaskWithCallback()
build([t], local_scheduler=True)
self.assertEqual(dummies[0], "foo")
def _run_processing_time_handler(self, fail):
result = []
@EmptyTask.event_handler(Event.PROCESSING_TIME)
def save_task(task, processing_time):
result.append((task, processing_time))
times = [43.0, 1.0]
t = EmptyTask(fail)
with patch('luigi.worker.time') as mock:
mock.time = times.pop
build([t], local_scheduler=True)
return t, result
def test_processing_time_handler_success(self):
t, result = self._run_processing_time_handler(False)
self.assertEqual(len(result), 1)
task, time = result[0]
self.assertTrue(task is t)
self.assertEqual(time, 42.0)
def test_processing_time_handler_failure(self):
t, result = self._run_processing_time_handler(True)
self.assertEqual(result, [])
# A
# / \
# B(1) B(2)
# | |
# C(1) C(2)
# | \ | \
# D(1) D(2) D(3)
def eval_contents(f):
with f.open('r') as i:
return eval(i.read())
class ConsistentMockOutput(object):
'''
Computes output location and contents from the task and its parameters. Rids us of writing ad-hoc boilerplate output() et al.
'''
param = luigi.IntParameter(default=1)
def output(self):
return MockTarget('/%s/%u' % (self.__class__.__name__, self.param))
def produce_output(self):
with self.output().open('w') as o:
o.write(repr([self.task_id] + sorted([eval_contents(i) for i in flatten(self.input())])))
class HappyTestFriend(ConsistentMockOutput, luigi.Task):
'''
Does trivial "work", outputting the list of inputs. Results in a convenient lispy comparable.
'''
def run(self):
self.produce_output()
class D(ConsistentMockOutput, luigi.ExternalTask):
pass
class C(HappyTestFriend):
def requires(self):
return [D(self.param), D(self.param + 1)]
class B(HappyTestFriend):
def requires(self):
return C(self.param)
class A(HappyTestFriend):
def requires(self):
return [B(1), B(2)]
class TestDependencyEvents(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
def _run_test(self, task, expected_events):
actual_events = {}
# yucky to create separate callbacks; would be nicer if the callback received an instance of a subclass of Event, so one callback could accumulate all types
@luigi.Task.event_handler(Event.DEPENDENCY_DISCOVERED)
def callback_dependency_discovered(*args):
actual_events.setdefault(Event.DEPENDENCY_DISCOVERED, set()).add(tuple(map(lambda t: t.task_id, args)))
@luigi.Task.event_handler(Event.DEPENDENCY_MISSING)
def callback_dependency_missing(*args):
actual_events.setdefault(Event.DEPENDENCY_MISSING, set()).add(tuple(map(lambda t: t.task_id, args)))
@luigi.Task.event_handler(Event.DEPENDENCY_PRESENT)
def callback_dependency_present(*args):
actual_events.setdefault(Event.DEPENDENCY_PRESENT, set()).add(tuple(map(lambda t: t.task_id, args)))
build([task], local_scheduler=True)
self.assertEqual(actual_events, expected_events)
def test_incomplete_dag(self):
for param in range(1, 3):
D(param).produce_output()
self._run_test(A(), {
'event.core.dependency.discovered': set([
('A(param=1)', 'B(param=1)'),
('A(param=1)', 'B(param=2)'),
('B(param=1)', 'C(param=1)'),
('B(param=2)', 'C(param=2)'),
('C(param=1)', 'D(param=1)'),
('C(param=1)', 'D(param=2)'),
('C(param=2)', 'D(param=2)'),
('C(param=2)', 'D(param=3)'),
]),
'event.core.dependency.missing': set([
('D(param=3)',),
]),
'event.core.dependency.present': set([
('D(param=1)',),
('D(param=2)',),
]),
})
self.assertFalse(A().output().exists())
def test_complete_dag(self):
for param in range(1, 4):
D(param).produce_output()
self._run_test(A(), {
'event.core.dependency.discovered': set([
('A(param=1)', 'B(param=1)'),
('A(param=1)', 'B(param=2)'),
('B(param=1)', 'C(param=1)'),
('B(param=2)', 'C(param=2)'),
('C(param=1)', 'D(param=1)'),
('C(param=1)', 'D(param=2)'),
('C(param=2)', 'D(param=2)'),
('C(param=2)', 'D(param=3)'),
]),
'event.core.dependency.present': set([
('D(param=1)',),
('D(param=2)',),
('D(param=3)',),
]),
})
self.assertEqual(eval_contents(A().output()), ['A(param=1)', ['B(param=1)', ['C(param=1)', ['D(param=1)'], ['D(param=2)']]], ['B(param=2)', ['C(param=2)', ['D(param=2)'], ['D(param=3)']]]])
| |
from cmsis_svd.parser import SVDParser
import json
import re
# ------------------------------------
# ~ $ ls /usr/lib/python3.5/site-packages/cmsis_svd/data/STMicro/
# Contents.txt STM32F091x.svd STM32F105xx.svd STM32F303xE.svd STM32F401x.svd STM32F437x.svd STM32L053x.svd STM32L15xxxA.svd
# License.html STM32F0xx.svd STM32F107xx.svd STM32F303x.svd STM32F40x.svd STM32F439x.svd STM32L062x.svd STM32L1xx.svd
# STM32F030.svd STM32F100xx.svd STM32F20x.svd STM32F30x.svd STM32F411xx.svd STM32F446x.svd STM32L063x.svd STM32L4x6.svd
# STM32F031x.svd STM32F101xx.svd STM32F21x.svd STM32F334x.svd STM32F41x.svd STM32F46_79x.svd STM32L100.svd STM32W108.svd
# STM32F042x.svd STM32F102xx.svd STM32F301x.svd STM32F37x.svd STM32F427x.svd STM32L051x.svd STM32L15xC.svd
# STM32F072x.svd STM32F103xx.svd STM32F302x.svd STM32F401xE.svd STM32F429x.svd STM32L052x.svd STM32L15xxE.svd
svd_name = 'STM32F303xE.svd'
want_ofs = True
want_len = True
# Do not print poripheral field definitions (same as first instance)
no_print_fields = [
'GPIOB',
'GPIOC',
'GPIOD',
'GPIOE',
'GPIOF',
'GPIOG',
'GPIOH',
'USART2',
'USART3',
'USART4',
'USART5',
'SPI2',
'SPI3',
'TIM3',
'DAC2',
'SPI2',
'SPI3',
'ADC2',
'ADC3',
'ADC4',
'ADC34',
'I2C2',
'I2C3',
]
# Rename peripheral when building field definitions
# Used for multiple instances (build fields only for the first)
periph_rename_for_field = {
'GPIOA': 'GPIO',
'USART1': 'USART',
'DAC1': 'DAC',
'SPI1': 'SPI',
'ADC1': 'ADC',
'ADC12': 'ADCC',
'I2C1': 'I2C'
}
# Same registers as... (points to first instance)
same_regs_as = {
'GPIOB': 'GPIOA',
'GPIOC': 'GPIOA',
'GPIOD': 'GPIOA',
'GPIOE': 'GPIOA',
'GPIOF': 'GPIOA',
'GPIOG': 'GPIOA',
'GPIOH': 'GPIOA',
'USART2': 'USART1',
'USART3': 'USART1',
'USART4': 'USART1',
'USART5': 'USART1',
'DAC2': 'DAC1',
'SPI2': 'SPI1',
'SPI3': 'SPI1',
'ADC2': 'ADC1',
'ADC3': 'ADC1',
'ADC4': 'ADC1',
'I2C2': 'I2C1',
'I2C3': 'I2C1',
'ADC34': 'ADC12',
'ADC2': 'ADC1',
'ADC3': 'ADC1',
'ADC4': 'ADC1',
'TIM3': 'TIM2',
'TIM4': 'TIM2',
}
# Rename peripheral when generating (bad name in SVD)
periph_rename = {
'ADC1_2': 'ADC12',
'ADC3_4': 'ADC34',
'Flash': 'FLASH'
}
# ------------------------------------
base_line = "{0:<30} EQU {1:#x}"
reg_line = "{0:<30} EQU ({1}_BASE + {2:#x})"
field_line = "{0:<30} EQU {1:#010x}"
field_ofs_line = "{0:<30} EQU {1:#d}"
field_len_line = field_ofs_line
def comment_str(x):
if x is None:
return ''
return '; %s' % re.sub(r"[\s\n]+", ' ', x.replace('\n',' '))
def comment(x):
print(comment_str(x))
def banner(x):
comment('==== {:=<55}'.format("%s " % x))
def caption(x):
print()
comment('---- {:-<55}'.format("%s " % x))
def comment(x):
print(comment_str(x))
# ------------------------------------
parser = SVDParser.for_packaged_svd('STMicro', svd_name)
device = parser.get_device()
print()
banner('%s PERIPHERALS' % device.name)
comment('')
comment('CTU Prague, FEL, Department of Measurement')
comment('')
comment('-' * 60)
comment('')
comment('Generated from "%s"' % svd_name)
comment('')
comment('SVD parsing library (c) Paul Osborne, 2015-2016')
comment(' https://github.com/posborne/cmsis-svd')
comment('ASM building script (c) Ondrej Hruska, 2016')
comment('')
comment('=' * 60)
print()
# periph registers
def print_registers(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print(reg_line.format("%s_%s" % (pname, register.name), pname, register.address_offset), end=' ')
comment(register.description)
# periph fields
def print_fields(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print()
comment('%s_%s fields:' % (pname, register.name))
print()
for field in register.fields:
mask = ((1 << field.bit_width) - 1) << field.bit_offset
f_pname = periph_rename_for_field.get(pname, pname)
print(field_line.format("%s_%s_%s" % (f_pname, register.name, field.name), mask), end=' ')
comment(field.description)
if want_ofs:
print(field_ofs_line.format("%s_%s_%s_ofs" % (f_pname, register.name, field.name), field.bit_offset))
if want_len:
print(field_len_line.format("%s_%s_%s_len" % (f_pname, register.name, field.name), field.bit_width))
print()
# Print the list
periph_dict = {}
for peripheral in device.peripherals:
periph_name = periph_rename.get(peripheral.name, peripheral.name)
# add to a dict for referencing by name
periph_dict[periph_name] = peripheral
# -----
caption(periph_name)
comment('Desc: %s' % peripheral.description)
print()
comment('%s base address:' % periph_name)
print(base_line.format("%s_BASE" % periph_name, peripheral.base_address))
print()
comment('%s registers:' % periph_name)
print()
# Registers
if periph_name in same_regs_as:
print_registers(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_registers(peripheral)
if periph_name in no_print_fields:
comment('Fields the same as in the first instance.')
continue
# Fields
if periph_name in same_regs_as:
print_fields(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_fields(peripheral)
print(' END\n')
| |
import logging
import argparse
from typing import Any, Dict, List, Optional, Tuple, Set
import re
import os
from redkyn.canvas import CanvasAPI
from redkyn.canvas.exceptions import CourseNotFound, StudentNotFound
from assigner import make_help_parser
from assigner.backends.base import RepoError, RepoBase, BackendBase
from assigner.backends.decorators import requires_config_and_backend
from assigner.backends.exceptions import CIArtifactNotFound
from assigner.exceptions import AssignerException
from assigner.roster_util import get_filtered_roster
from assigner import progress
from assigner.config import Config
help = "Retrieves scores from CI artifacts and optionally uploads to Canvas"
logger = logging.getLogger(__name__)
class CIJobNotFound(AssignerException):
""" No CI jobs found for repository. """
class OptionalCanvas:
"""
A class that wraps a single CanvasAPI instance and related API
ID information that both caches the info and queries it only
when needed
"""
_api = None
_section_ids = {}
_assignment_ids = {}
@staticmethod
def lookup_canvas_ids(
conf: Config, canvas: CanvasAPI, hw_name: str
) -> Tuple[Dict[str, int], Dict[str, int]]:
"""
Retrieves the list of internal Canvas IDs for a given assignment
and the relevant sections
:param hw_name: the name of the homework assignment to search for on Canvas
:return: "section_ids", a map of section names/identifiers onto
Canvas internal course IDs and "assignment_ids", a map of section
names/identifiers onto the Canvas internal assignment IDs for a given assignment
"""
if "canvas-courses" not in conf or not conf["canvas-courses"]:
logger.error(
'canvas-courses configuration is missing! Please use the "assigner canvas import"'
"command to associate course IDs with section names"
)
print("Canvas course listing failed: missing section Canvas course IDs.")
raise CourseNotFound
courses = conf["canvas-courses"]
section_ids = {course["section"]: course["id"] for course in courses}
min_name = re.search(r"[A-Za-z]+\d+", hw_name).group(0)
assignment_ids = {}
for section, course_id in section_ids.items():
try:
canvas_assignments = canvas.get_course_assignments(course_id, min_name)
except CourseNotFound:
logger.error("Failed to pull assignment list from Canvas")
raise
if len(canvas_assignments) != 1:
logger.warning(
"Could not uniquely identify Canvas assignment from name %s and section %s, using first assignment listed",
min_name,
section,
)
assignment_ids[section] = canvas_assignments[0]["id"]
return (section_ids, assignment_ids)
@classmethod
def get_api(cls, conf: Config) -> CanvasAPI:
if not cls._api:
if "canvas-token" not in conf:
logger.error(
"canvas-token configuration is missing! Please set the Canvas API access "
"token before attempting to use Canvas API functionality"
)
print("Canvas course listing failed: missing Canvas API access token.")
raise KeyError
cls._api = CanvasAPI(conf["canvas-token"], conf["canvas-host"])
return cls._api
@classmethod
def get_section_ids(cls, conf: Config, hw_name: str) -> Dict[str, Any]:
if not cls._section_ids:
cls._section_ids, cls._assignment_ids = cls.lookup_canvas_ids(
conf, cls.get_api(conf), hw_name
)
return cls._section_ids
@classmethod
def get_assigment_ids(cls, conf: Config, hw_name: str) -> Dict[str, Any]:
if not cls._assignment_ids:
cls._section_ids, cls._assignment_ids = cls.lookup_canvas_ids(
conf, cls.get_api(conf), hw_name
)
return cls._assignment_ids
def get_most_recent_score(repo: RepoBase, result_path: str) -> float:
"""
Queries the most recent CI job for an artifact containing the score
:param repo: the repository whose CI jobs should be checked
:param result_path: the absolute path to the artifact file within the repo
:return: the score in the artifact file
"""
try:
ci_jobs = repo.list_ci_jobs()
if len(ci_jobs) == 0:
raise CIJobNotFound
most_recent_job_id = ci_jobs[0]["id"]
score_file = repo.get_ci_artifact(most_recent_job_id, result_path)
last_token = score_file.split()[-1]
score = float(last_token)
if not 0.0 <= score <= 100.0:
logger.warning("Unusual score retrieved: %f.", score)
return score
except CIArtifactNotFound as e:
logger.warning("CI artifact does not exist in repo %s.", repo.name_with_namespace)
logger.debug(e)
def student_search(
roster: List[Dict[str, Any]], query: str
) -> Optional[Dict[str, Any]]:
"""
Obtains the student object corresponding to the search query,
prompting the user for input if disambiguation is necessary (>1 matches)
:param roster: the part of the config structure containing
the list of enrolled students
:param query: the search query, could contain part of SIS username or
full name
:return: the roster entry matching the query
"""
candidate_students = []
result = None
# Search through the entire class for a match
for student in roster:
if (
query.lower() in student["name"].lower().replace(",", "")
or query.lower() in student["username"]
or query.lower() in " ".join(student["name"].lower().split(",")[::-1])
):
candidate_students.append(student)
if not candidate_students:
logger.error("No student found matching query %s", query)
elif len(candidate_students) == 1:
result = candidate_students[0]
else:
for ct, cand_user in enumerate(candidate_students):
print("{}: {}, {}".format(ct, cand_user["name"], cand_user["username"]))
selected = -1
while selected < 0 or selected >= len(candidate_students):
selected = int(input("Enter the number of the correct student: "))
result = candidate_students[selected]
return result
def verify_commit(auth_emails: List[str], repo: RepoBase, commit_hash: str) -> bool:
"""
Checks whether a commit has been made by an authorized user
:param auth_emails: the list of emails authorized to modify the repository
:param repo: the repository object to check
:param commit_hash: the full SHA of the commit to check
:return: whether the committer was authorized (True if authorized, False otherwise)
"""
email = repo.get_commit_signature_email(commit_hash)
if not email:
return False
return email in auth_emails
def check_repo_integrity(
repo: RepoBase, files_to_check: Set[str], since: str = ""
) -> None:
"""
Checks whether any "protected" files in a repository have been modified
by an unauthorized user and logs any violations
:param repo: the repository object to check
:param files_to_check: the absolute paths (within the repo) of protected files
:param since: the date after which to check, i.e., commits prior to this date are ignored
"""
auth_emails = repo.list_authorized_emails()
commits = repo.list_commit_hashes("master", since)
for commit in commits:
modified_files = files_to_check.intersection(repo.list_commit_files(commit))
if modified_files and not verify_commit(auth_emails, repo, commit):
logger.warning("commit %s modified files: %s", commit, str(modified_files))
def print_statistics(scores: List[float]) -> None:
"""
Displays aggregate information (summary statistics)
for a one-dimensional data set
"""
if len(scores) == 0:
return
print("---Assignment Statistics---")
print("Mean: ", sum(scores) / len(scores))
print("Number of zeroes:", len([score for score in scores if score < 0.1]))
print("Number of hundreds:", len([score for score in scores if score > 99.9]))
print_histogram(scores)
def print_histogram(scores: List[float]) -> None:
"""
A utility function for printing an ASCII histogram
for a one-dimensional data set
"""
print("ASCII Histogram:")
num_buckets = 10
range_min = min(scores)
range_max = max(scores)
max_col = os.get_terminal_size()[0] - 15
bucket_width = (range_max - range_min) / num_buckets
buckets = [(i * bucket_width, (i + 1) * bucket_width) for i in range(num_buckets)]
counts = {}
# First count up each bucket
for bucket in buckets:
count = len([score for score in scores if bucket[0] <= score < bucket[1]])
# If it's the last bucket we include the top (i.e. the range max)
if bucket == buckets[-1]:
count += scores.count(range_max)
counts[bucket] = count
# Then set up the scale factor to maximally utilize the terminal space
mult_factor = max_col / max(counts.values())
# Finally, print everything out
for bucket in buckets:
proportional_len = int(counts[bucket] * mult_factor)
print(
"[{:4}, {:5}{}: {}".format(
bucket[0],
bucket[1],
(")", "]")[bucket == buckets[-1]],
proportional_len * "=",
)
)
def handle_scoring(
conf: Config,
backend: BackendBase,
args: argparse.Namespace,
student: Dict[str, Any],
) -> Optional[float]:
"""
Obtains the autograded score from a repository's CI jobs
:param student: The part of the config structure with info
on a student's username, ID, and section
:return: The score obtained from the results file
"""
hw_name = args.name
upload = args.upload if "upload" in args else True
files_to_check = set(args.files)
backend_conf = conf.backend
username = student["username"]
student_section = student["section"]
full_name = backend.student_repo.build_name(
conf.semester, student_section, hw_name, username
)
try:
repo = backend.student_repo(backend_conf, conf.namespace, full_name)
logger.info("Scoring %s...", repo.name_with_namespace)
if "id" not in student:
student["id"] = backend.repo.get_user_id(username, backend_conf)
if not args.noverify:
unlock_time = repo.get_member_add_date(student["id"])
check_repo_integrity(repo, files_to_check, unlock_time)
score = get_most_recent_score(repo, args.path)
if upload:
canvas = OptionalCanvas.get_api(conf)
section_ids = OptionalCanvas.get_section_ids(conf, hw_name)
assignment_ids = OptionalCanvas.get_assigment_ids(conf, hw_name)
course_id = section_ids[student_section]
assignment_id = assignment_ids[student_section]
try:
if "canvas-id" not in student:
raise StudentNotFound(
"No Canvas ID for student. Remove the student with `assigner roster remove {}`,"
" then run 'assigner canvas import {} {}`.".format(
username, course_id, student_section
)
)
# Append a percent as provided scores are percentages and not number of pts
canvas.put_assignment_submission(
course_id, assignment_id, student["canvas-id"], str(score) + "%",
)
except StudentNotFound as e:
logger.debug(e)
logger.warning("Unable to update submission for Canvas assignment")
except CIJobNotFound:
logger.error("No CI jobs found for repo %s", repo.name_with_namespace)
score = None
except RepoError as e:
logger.debug(e)
logger.warning("Unable to find repo for %s with URL %s", username, full_name)
score = None
return score
@requires_config_and_backend
def score_assignments(
conf: Config, backend: BackendBase, args: argparse.Namespace
) -> None:
"""Goes through each student repository and grabs the most recent CI
artifact, which contains their autograded score
"""
student = args.student
roster = get_filtered_roster(conf.roster, args.section, student)
scores = []
for student in progress.iterate(roster):
score = handle_scoring(conf, backend, args, student)
if score is not None:
scores.append(score)
print("Scored {} repositories.".format(len(scores)))
print_statistics(scores)
@requires_config_and_backend
def checkout_students(
conf: Config, backend: BackendBase, args: argparse.Namespace
) -> None:
"""Interactively prompts for student info and grabs the most recent CI
artifact, which contains their autograded score
"""
roster = get_filtered_roster(conf.roster, args.section, None)
while True:
query = input("Enter student ID or name, or 'q' to quit: ")
if "quit".startswith(query):
break
student = student_search(roster, query)
if not student:
continue
score = handle_scoring(conf, backend, args, student)
logger.info("Uploaded score of %d", (score))
@requires_config_and_backend
def integrity_check(
conf: Config, backend: BackendBase, args: argparse.Namespace
) -> None:
"""Checks that none of the grading files were modified in the timeframe
during which students could push to their repository
"""
student = args.student
files_to_check = set(args.files)
roster = get_filtered_roster(conf.roster, args.section, None)
for student in progress.iterate(roster):
username = student["username"]
student_section = student["section"]
full_name = backend.student_repo.build_name(
conf.semester, student_section, args.name, username
)
try:
repo = backend.student_repo(conf.backend, conf.namespace, full_name)
check_repo_integrity(repo, files_to_check)
except RepoError as e:
logger.debug(e)
logger.warning(
"Unable to find repo for %s with URL %s", username, full_name
)
def setup_parser(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(title="Scoring commands")
all_parser = subparsers.add_parser(
"all",
help="Get scores (using CI artifacts) for all students for a given assignment",
)
all_parser.add_argument("--student", nargs=1, help="ID of student to score")
all_parser.add_argument(
"--upload", action="store_true", help="Upload grades to Canvas"
)
all_parser.set_defaults(run=score_assignments)
interactive_parser = subparsers.add_parser(
"interactive",
help="Interactively checkout individual students and upload their grades to Canvas",
)
interactive_parser.set_defaults(run=checkout_students)
integrity_parser = subparsers.add_parser(
"integrity",
help="Check the integrity of desired files for a set of assignment respositories",
)
integrity_parser.add_argument("--student", nargs=1, help="ID of student to score")
integrity_parser.set_defaults(run=integrity_check)
# Flags common to all subcommands
for subcmd_parser in [all_parser, interactive_parser, integrity_parser]:
subcmd_parser.add_argument("name", help="Name of the assignment to check")
subcmd_parser.add_argument("--section", nargs=1, help="Section to check")
subcmd_parser.add_argument(
"-f",
"--files",
nargs="+",
dest="files",
default=[".gitlab-ci.yml"],
help="Files to check for modification",
)
# Flags common to the scoring subcommands
for subcmd_parser in [all_parser, interactive_parser]:
subcmd_parser.add_argument(
"--noverify",
action="store_true",
help="Don't check whether a student has overwritten the grader files",
)
subcmd_parser.add_argument(
"--path",
default="results.txt",
help="Path within repo to grader results file",
)
make_help_parser(parser, subparsers, "Show help for score or one of its commands")
| |
"""Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import Camera, Light
from pyunifiprotect.data.types import RecordingMode, VideoMode
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
ALL_DEVICES_SWITCHES,
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[0]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = ALL_DEVICES_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[0]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = ALL_DEVICES_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
if description.name in ("High FPS", "Privacy Mode"):
return
assert description.ufp_set_method is not None
camera.__fields__[description.ufp_set_method] = Mock()
setattr(camera, description.ufp_set_method, AsyncMock())
set_method = getattr(camera, description.ufp_set_method)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[2]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[3]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
| |
#!/usr/bin/env python3
from proxy import proxy
from select import select
import importlib
import json
import os
import pprint
import re
import sys
import telnetlib
import threading
import traceback
telnetlib.GMCP = b'\xc9'
class Session(object):
def __init__(self, world_module, port, arg):
self.mud_encoding = 'iso-8859-1'
self.client_encoding = 'utf-8'
self.world_module = world_module
self.arg = arg
self.world = world_module.getClass()(self, self.arg)
try:
self.socketToPipeR, self.pipeToSocketW, self.stopFlag, runProxy = proxy('::1', port)
self.pipeToSocketW = os.fdopen(self.pipeToSocketW, 'wb')
self.proxyThread = threading.Thread(target=runProxy)
self.proxyThread.start()
host_port = self.world.getHostPort()
self.log("Connecting")
self.telnet = self.connect(*host_port)
self.log("Connected")
except:
self.log("Shutting down")
self.stopFlag.set()
self.world.quit()
raise
def join(self):
self.thr.join()
def log(self, *args, **kwargs):
if len(args) == 1 and type(args[0]) == str:
line = args[0]
else:
line = pprint.pformat(args)
self.pipeToSocketW.write("---------\n".encode(self.client_encoding))
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.write(b"\n")
self.pipeToSocketW.flush()
def strip_ansi(self, line):
return re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', '', line)
def gmcpOut(self, msg):
self.telnet.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.GMCP + msg.encode(self.mud_encoding) + telnetlib.IAC + telnetlib.SE)
def iac(self, sock, cmd, option):
if cmd == telnetlib.WILL:
if option == telnetlib.GMCP:
self.log("Enabling GMCP")
sock.sendall(telnetlib.IAC + telnetlib.DO + option)
self.gmcpOut('Core.Hello { "client": "Cizra", "version": "1" }')
supportables = ['char 1', 'char.base 1', 'char.maxstats 1', 'char.status 1', 'char.statusvars 1', 'char.vitals 1', 'char.worth 1', 'comm 1', 'comm.tick 1', 'group 1', 'room 1', 'room.info 1']
self.gmcpOut('Core.Supports.Set ' + str(supportables).replace("'", '"'))
self.gmcpOut('request room')
self.gmcpOut('request char')
elif option == telnetlib.TTYPE:
self.log("Sending terminal type 'Cizra'")
sock.sendall(telnetlib.IAC + telnetlib.DO + option +
telnetlib.IAC + telnetlib.SB + telnetlib.TTYPE + telnetlib.BINARY + b'Cizra' + telnetlib.IAC + telnetlib.SE)
else:
sock.sendall(telnetlib.IAC + telnetlib.DONT + option)
elif cmd == telnetlib.SE:
data = self.telnet.read_sb_data()
if data and data[0] == ord(telnetlib.GMCP):
try:
self.handleGmcp(data[1:].decode(self.mud_encoding))
except Exception as e:
traceback.print_exc()
def handleGmcp(self, data):
# this.that {JSON blob}
# TODO: move into clients
space_idx = data.find(' ')
whole_key = data[:space_idx]
value_json = data[space_idx + 1:]
nesting = whole_key.split('.')
current = self.world.gmcp
for nest in nesting[:-1]:
if nest not in current:
current[nest] = {}
current = current[nest]
lastkey = nesting[-1]
try:
val = json.loads(value_json, strict=False)
except json.decoder.JSONDecodeError:
val = {"string": value_json}
if lastkey not in current:
current[lastkey] = {}
current[lastkey] = val
self.world.handleGmcp(whole_key, val)
def connect(self, host, port):
t = telnetlib.Telnet()
t.set_option_negotiation_callback(self.iac)
# t.set_debuglevel(1)
t.open(host, int(port))
return t
def send(self, line):
print("> ", line)
self.telnet.write((line + '\n').encode(self.mud_encoding))
def handle_from_telnet(self):
try:
data = self.telnet.read_very_eager()
except:
self.log("EOF on telnet")
self.stopFlag.set()
self.world.quit()
raise
try:
data = data.decode(self.mud_encoding)
except UnicodeError as e:
print("Unicode error:", e)
print("Data was:", data)
data = ''
if not data:
_ = self.telnet.read_sb_data()
prn = []
for line in data.split('\n'):
if line:
replacement = None
try:
replacement = self.world.trigger(line.strip())
except Exception as e:
traceback.print_exc()
if replacement is not None:
line = replacement
prn.append(line)
self.pipeToSocketW.write('\n'.join(prn).encode(self.mud_encoding))
self.pipeToSocketW.flush()
def show(self, line):
self.pipeToSocketW.write(line.encode(self.client_encoding))
self.pipeToSocketW.flush()
def handle_from_pipe(self):
data = b'' # to handle partial lines
try:
data += os.read(self.socketToPipeR, 4096)
lines = data.split(b'\n')
if lines[-1] != '': # received partial line, don't process
data = lines[-1]
else:
data = b''
lines = lines[:-1] # chop off either the last empty line, or the partial line
for line in lines:
line = line.decode(self.client_encoding)
if line[-1] == '\r':
line = line[:-1]
self.handle_output_line(line)
except EOFError:
self.log("EOF in pipe")
self.stopFlag.set()
self.world.quit()
raise
def handle_output_line(self, data):
pprint.pprint(data)
if data == '#reload' and self.world:
self.log('Reloading world')
try:
state = self.world.state
gmcp = self.world.gmcp
self.world.quit()
self.world_module = importlib.reload(self.world_module)
self.world = self.world_module.getClass()(self, self.arg)
self.world.state = state
self.world.gmcp = gmcp
except Exception:
traceback.print_exc()
return
else:
handled = False
try:
handled = self.world.alias(data)
except Exception as e:
traceback.print_exc()
else:
if not handled:
self.send(data)
def run(self):
try:
while True:
fds, _, _ = select([self.telnet.get_socket(), self.socketToPipeR], [], [])
for fd in fds:
if fd == self.telnet.get_socket():
self.handle_from_telnet()
elif fd == self.socketToPipeR:
self.handle_from_pipe()
except Exception as e:
self.log("Exception in run():", e)
finally:
self.log("Closing")
self.telnet.close()
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("Usage: {} worldmodule (without .py) port [arg]".format(sys.argv[0]))
exit(1)
world_module = importlib.import_module(sys.argv[1])
port = int(sys.argv[2])
arg = sys.argv[3] if len(sys.argv) == 4 else None
ses = Session(world_module, port, arg)
ses.run()
assert(__name__ == '__main__')
main()
| |
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy
import sys
from itertools import product, combinations
from bitstring import BitArray
from mmd.slater import common_index, get_excitation
from mmd.utils.davidson import davidson
from scipy.special import comb
from scipy.linalg import sqrtm, lu
class PostSCF(object):
"""Class for post-scf routines"""
def __init__(self,mol):
self.mol = mol
if not self.mol.is_converged:
sys.exit("SCF not converged, skipping Post-SCF")
self.ao2mo()
def ao2mo(self):
"""Routine to convert AO integrals to MO integrals"""
self.mol.single_bar = np.einsum('mp,mnlz->pnlz',
self.mol.C,self.mol.TwoE)
temp = np.einsum('nq,pnlz->pqlz',
self.mol.C,self.mol.single_bar)
self.mol.single_bar = np.einsum('lr,pqlz->pqrz',
self.mol.C,temp)
temp = np.einsum('zs,pqrz->pqrs',
self.mol.C,self.mol.single_bar)
self.mol.single_bar = temp
# TODO: Make this tx more elegant?
# tile spin to make spin orbitals from spatial (twice dimension)
self.mol.norb = self.mol.nbasis * 2 # spin orbital
self.mol.double_bar = np.zeros([2*idx for idx in self.mol.single_bar.shape])
for p in range(self.mol.double_bar.shape[0]):
for q in range(self.mol.double_bar.shape[1]):
for r in range(self.mol.double_bar.shape[2]):
for s in range(self.mol.double_bar.shape[3]):
value1 = self.mol.single_bar[p//2,r//2,q//2,s//2].real * (p%2==r%2) * (q%2==s%2)
value2 = self.mol.single_bar[p//2,s//2,q//2,r//2].real * (p%2==s%2) * (q%2==r%2)
self.mol.double_bar[p,q,r,s] = value1 - value2
# create Hp, the spin basis one electron operator
spin = np.eye(2)
self.mol.Hp = np.kron(np.einsum('uj,vi,uv', self.mol.C, self.mol.C, self.mol.Core).real,spin)
# create fs, the spin basis fock matrix eigenvalues
self.mol.fs = np.kron(np.diag(self.mol.MO),spin)
def MP2(self,spin_orbital=False):
"""Routine to compute MP2 energy from RHF reference"""
if spin_orbital:
# Use spin orbitals from RHF reference
EMP2 = 0.0
occupied = range(self.mol.nelec)
virtual = range(self.mol.nelec,self.mol.norb)
for i,j,a,b in product(occupied,occupied,virtual,virtual):
denom = self.mol.fs[i,i] + self.mol.fs[j,j] \
- self.mol.fs[a,a] - self.mol.fs[b,b]
numer = self.mol.double_bar[i,j,a,b]**2
EMP2 += numer/denom
self.mol.emp2 = 0.25*EMP2 + self.mol.energy
else:
# Use spatial orbitals from RHF reference
EMP2 = 0.0
occupied = range(self.mol.nocc)
virtual = range(self.mol.nocc,self.mol.nbasis)
for i,j,a,b in product(occupied,occupied,virtual,virtual):
denom = self.mol.MO[i] + self.mol.MO[j] \
- self.mol.MO[a] - self.mol.MO[b]
numer = self.mol.single_bar[i,a,j,b] \
* (2.0*self.mol.single_bar[i,a,j,b]
- self.mol.single_bar[i,b,j,a])
EMP2 += numer/denom
self.mol.emp2 = EMP2 + self.mol.energy
print('E(MP2) = ', self.mol.emp2.real)
@staticmethod
def tuple2bitstring(bit_tuple):
''' From tuple of occupied orbitals, return bitstring representation '''
string = ['0']*(max(bit_tuple) + 1)
for i in bit_tuple:
string[i] = '1'
string = ''.join(string[::-1])
return BitArray(bin=string)
def hamiltonian_matrix_element(self,det1,det2,Nint):
""" return general Hamiltonian matrix element <det1|H|det2> """
exc, degree, phase = get_excitation(det1,det2,Nint)
if degree > 2:
return 0
elif degree == 2:
# sign * <hole1,hole2||particle1,particle2>
return phase * self.mol.double_bar[exc[1,0], exc[2,0], exc[1,1], exc[2,1]]
elif degree == 1:
m = exc[1,0]
p = exc[1,1]
common = common_index(det1,det2,Nint)
tmp = self.mol.Hp[m,p]
for n in common:
tmp += self.mol.double_bar[m, n, p, n]
return phase * tmp
elif degree == 0:
# kind of lazy to use common_index...
common = common_index(det1,det2,Nint)
tmp = 0.0
for m in common:
tmp += self.mol.Hp[m, m]
for n in common:
tmp += 0.5*self.mol.double_bar[m,n,m,n]
return phase * tmp
def build_full_hamiltonian(self,det_list):
''' Given a list of determinants, construct the full Hamiltonian matrix '''
Nint = int(np.floor(self.mol.norb/64) + 1)
H = np.zeros((len(det_list),len(det_list)))
print("Building Hamiltonian...")
for idx,det1 in enumerate(det_list):
for jdx,det2 in enumerate(det_list[:(idx+1)]):
value = self.hamiltonian_matrix_element(det1,det2,Nint)
H[idx,jdx] = value
H[jdx,idx] = value
return H
def residues(self,determinant):
''' Returns list of residues, which is all possible ways to remove two
electrons from a given determinant with number of orbitals nOrb
'''
nOrb = self.mol.norb
residue_list = []
nonzero = bin(determinant).count('1')
for i in range(nOrb):
mask1 = (1 << i)
for j in range(i):
mask2 = (1 << j)
mask = mask1 ^ mask2
if bin(determinant & ~mask).count('1') == (nonzero - 2):
residue_list.append(determinant & ~mask)
return residue_list
def add_particles(self,residue_list):
''' Returns list of determinants, which is all possible ways to add two
electrons from a given residue_list with number of orbitals nOrb
'''
nOrb = self.mol.norb
determinants = []
for residue in residue_list:
determinant = residue
for i in range(nOrb):
mask1 = (1 << i)
if not bool(determinant & mask1):
one_particle = determinant | mask1
for j in range(i):
mask2 = (1 << j)
if not bool(one_particle & mask2):
two_particle = one_particle | mask2
determinants.append(two_particle)
#return [format(det,'#0'+str(n_orbitals+2)+'b') for det in list(set(determinants))]
return list(set(determinants))
def single_and_double_determinants(self,determinant):
return [np.array([i]) for i in self.add_particles(self.residues(determinant))]
def CISD(self):
''' Do CISD from RHF reference '''
nEle = self.mol.nelec
reference_determinant = int(2**nEle - 1) # reference determinant, lowest nEle orbitals filled
det_list = self.single_and_double_determinants(reference_determinant)
num_dets = len(det_list)
if num_dets > 5000:
print("Number determinants: ", num_dets)
sys.exit("CI too expensive. Quitting.")
H = self.build_full_hamiltonian(det_list)
print("Diagonalizing Hamiltonian...")
#E,C = scipy.linalg.eigh(H)
E,C = davidson(H,3)
self.mol.ecisd = E[0] + self.mol.nuc_energy
print("\nConfiguration Interaction Singles and Doubles")
print("------------------------------")
print("# Determinants: ",len(det_list))
print("SCF energy: %12.8f" % self.mol.energy.real)
print("CISD corr: %12.8f" % (self.mol.ecisd - self.mol.energy.real))
print("CISD energy: %12.8f" % self.mol.ecisd)
def FCI(self):
"""Routine to compute FCI energy from RHF reference"""
nEle = self.mol.nelec
nOrb = self.mol.norb
det_list = []
if comb(nOrb,nEle) > 5000:
print("Number determinants: ",comb(nOrb,nEle))
sys.exit("FCI too expensive. Quitting.")
# FIXME: limited to 64 orbitals at the moment
for occlist in combinations(range(nOrb), nEle):
string = PostSCF.tuple2bitstring(occlist)
det = np.array([string.uint])
det_list.append(det)
H = self.build_full_hamiltonian(det_list)
print("Diagonalizing Hamiltonian...")
#E,C = scipy.linalg.eigh(H)
E,C = davidson(H,3)
self.mol.efci = E[0] + self.mol.nuc_energy
print("\nFull Configuration Interaction")
print("------------------------------")
print("# Determinants: ",len(det_list))
print("SCF energy: %12.8f" % self.mol.energy.real)
print("FCI corr: %12.8f" % (self.mol.efci - self.mol.energy.real))
print("FCI energy: %12.8f" % self.mol.efci)
def CIS(self):
""" Routine to compute CIS from RHF reference """
nOcc = self.mol.nelec
nVir = self.mol.norb - self.mol.nelec
nOV = nOcc * nVir
occ = slice(nOcc)
vir = slice(nOcc,self.mol.norb)
if nOV > 5000:
print("Number determinants: ",nOV)
sys.exit("CIS too expensive. Quitting.")
A = np.einsum('ab,ij->iajb',np.diag(np.diag(self.mol.fs)[vir]),np.diag(np.ones(nOcc))) # + e_a
A -= np.einsum('ij,ab->iajb',np.diag(np.diag(self.mol.fs)[occ]),np.diag(np.ones(nVir))) # - e_i
A += np.einsum('ajib->iajb',self.mol.double_bar[vir,occ,occ,vir]) # + <aj||ib>
A = A.reshape(nOV,nOV)
#if construction == 'bitstring':
# det_list = []
# # FIXME: limited to 64 orbitals at the moment
# occ = range(nEle)
# vir = range(nEle,nOrb)
# occlist_string = product(combinations(occ,nEle-1),combinations(vir,1)) # all single excitations
# # FIXME: this will not work for Python < 3.5
# occlist_string = [(*a,*b) for a,b in occlist_string] # unpack tuples to list of tuples of occupied orbitals
# assert len(occlist_string) == nOV
# for occlist in occlist_string:
# string = PostSCF.tuple2bitstring(occlist)
# det = np.array([string.uint])
# det_list.append(det)
# A = self.build_full_hamiltonian(det_list)
# # subtract reference to get true "A" matrix
# A += np.eye(len(A))*(- self.mol.energy.real + self.mol.nuc_energy)
print("Diagonalizing Hamiltonian...")
transition_energies, transition_densities = scipy.linalg.eigh(A)
# MO tx dipole integrals
spin = np.eye(2)
mo_basis_dipoles = np.kron(np.einsum('uj,vi,...uv', \
self.mol.C, self.mol.C, \
self.mol.M).real,spin)
oscillator_strengths = np.zeros_like(transition_energies)
for state in range(len(transition_energies)):
transition_density = transition_densities[:,state]
transition_dipoles = np.einsum('ia,pia->p', \
transition_density.reshape(nOcc,nVir), \
mo_basis_dipoles[:,occ,vir])
sum_sq_td = np.einsum('p,p',transition_dipoles,transition_dipoles)
oscillator_strengths[state] = (2/3)*transition_energies[state]*sum_sq_td
# represent as energy differences / excitation energies
transition_energies *= 27.211399 # to eV
self.mol.cis_omega = transition_energies
self.mol.cis_oscil = oscillator_strengths
print("\nConfiguration Interaction Singles (CIS)")
print("------------------------------")
print("# Determinants: ",len(A))
print("nOcc * nVirt: ",nOV)
for state in range(min(len(A),30)):
print("CIS state %2s (eV): %12.4f (f=%6.4f)" % (state+1,self.mol.cis_omega[state],self.mol.cis_oscil[state]))
def TDHF(self,alg='hermitian'):
""" Routine to compute TDHF from RHF reference
alg: 'hermitian' (does the Hermitian reduced variant, sqrt(A-B).(A+B).sqrt(A-B))
'reduced' (does the non-Hermitian reduced variant, (A-B).(A+B))
'full' (does the non-Hermitian [[A,B],[-B.T,-A.T]]')
"""
nOcc = self.mol.nelec
nVir = self.mol.norb - self.mol.nelec
nOV = nOcc * nVir
occ = slice(nOcc)
vir = slice(nOcc,self.mol.norb)
# form full A and B matrices
A = np.einsum('ab,ij->iajb',np.diag(np.diag(self.mol.fs)[vir]),np.diag(np.ones(nOcc))) # + e_a
A -= np.einsum('ij,ab->iajb',np.diag(np.diag(self.mol.fs)[occ]),np.diag(np.ones(nVir))) # - e_i
A += np.einsum('ajib->iajb',self.mol.double_bar[vir,occ,occ,vir]) # + <aj||ib>
B = np.einsum('abij->iajb',self.mol.double_bar[vir,vir,occ,occ]) # + <ab||ij>
A = A.reshape(nOV,nOV)
B = B.reshape(nOV,nOV)
# doing Hermitian variant
if alg == 'hermitian':
sqrt_term = sqrtm(A-B)
H = np.dot(sqrt_term,np.dot(A+B,sqrt_term))
transition_energies,transition_densities = scipy.linalg.eigh(H)
transition_energies = np.sqrt(transition_energies)
elif alg == 'reduced':
H = np.dot(A-B,A+B)
transition_energies,transition_densities = np.linalg.eig(H)
transition_energies = np.sqrt(transition_energies)
idx = transition_energies.argsort()
transition_energies = transition_energies[idx].real
elif alg == 'full':
H = np.block([[A,B],[-B.T,-A.T]])
transition_energies,transition_densities = np.linalg.eig(H)
idx = transition_energies.argsort()
transition_energies = transition_energies[idx].real
# take positive eigenvalues
transition_energies = transition_energies[nOV:]
transition_energies *= 27.211399 # to eV
self.mol.tdhf_omega = transition_energies
print("\nTime-dependent Hartree-Fock (TDHF)")
print("------------------------------")
print("Algorithm: ",alg)
print("Matrix shape: ",len(H))
print("2 * nOcc * nVirt: ",2*nOV)
for state in range(min(len(A),10)):
print("TDHF state %2s (eV): %12.4f" % (state+1,self.mol.tdhf_omega[state]))
| |
"""
This module makes it possible to instantiate a new Troposphere Template object
from an existing CloudFormation Template.
Usage:
from troposphere.template_generator import TemplateGenerator
import json
with open("myCloudFormationTemplate.json") as f:
json_template = json.load(f)
template = TemplateGenerator(json_template)
template.to_json()
"""
import inspect
import pkgutil
import importlib
import os
from collections import Sequence, Mapping
from troposphere import (
Template, Ref,
Output, Parameter, # AWSDeclarations
AWSObject, # covers resources
AWSHelperFn, GenericHelperFn, # covers ref, fn::, etc
Tags, autoscaling, cloudformation)
from troposphere.policies import UpdatePolicy, CreationPolicy
class TemplateGenerator(Template):
DEPRECATED_MODULES = ['troposphere.dynamodb2']
_inspect_members = set()
_inspect_resources = {}
_inspect_functions = {}
def __init__(self, cf_template):
"""
Instantiates a new Troposphere Template based on an existing
Cloudformation Template.
"""
super(TemplateGenerator, self).__init__()
self._reference_map = {}
if 'AWSTemplateFormatVersion' in cf_template:
self.add_version(cf_template['AWSTemplateFormatVersion'])
if 'Transform' in cf_template:
self.add_transform(cf_template['Transform'])
if 'Description' in cf_template:
self.add_description(cf_template['Description'])
if 'Metadata' in cf_template:
self.add_metadata(cf_template['Metadata'])
for k, v in cf_template.get('Parameters', {}).iteritems():
self.add_parameter(self._create_instance(Parameter, v, k))
for k, v in cf_template.get('Mappings', {}).iteritems():
self.add_mapping(k, self._convert_definition(v))
for k, v in cf_template.get('Conditions', {}).iteritems():
self.add_condition(k, self._convert_definition(v, k))
for k, v in cf_template.get('Resources', {}).iteritems():
self.add_resource(self._convert_definition(
v, k,
self._get_resource_type_cls(v)
))
for k, v in cf_template.get('Outputs', {}).iteritems():
self.add_output(self._create_instance(Output, v, k))
@property
def inspect_members(self):
"""
Returns the list of all troposphere members we are able to
construct
"""
if not self._inspect_members:
TemplateGenerator._inspect_members = \
self._import_all_troposphere_modules()
return self._inspect_members
@property
def inspect_resources(self):
""" Returns a map of `ResourceType: ResourceClass` """
if not self._inspect_resources:
d = {}
for m in self.inspect_members:
if issubclass(m, (AWSObject, cloudformation.AWSCustomObject)) \
and hasattr(m, 'resource_type'):
d[m.resource_type] = m
TemplateGenerator._inspect_resources = d
return self._inspect_resources
@property
def inspect_functions(self):
""" Returns a map of `FunctionName: FunctionClass` """
if not self._inspect_functions:
d = {}
for m in self.inspect_members:
if issubclass(m, AWSHelperFn):
d[m.__name__] = m
TemplateGenerator._inspect_functions = d
return self._inspect_functions
def _get_resource_type_cls(self, resource):
"""Attempts to return troposphere class that represents Type of
provided resource. Attempts to find the troposphere class who's
`resource_type` field is the same as the provided resources `Type`
field.
:param resource: Resource to find troposphere class for
:return: None: If provided resource does not have a `Type` field
If no class found for provided resource
type: Type of provided resource
"""
# If provided resource does not have `Type` field
if 'Type' not in resource:
return None
# Attempt to find troposphere resource with:
# `resource_type` == resource['Type']
try:
return self.inspect_resources[resource['Type']]
except KeyError:
# If no resource with `resource_type` == resource['Type'] found
return None
def _convert_definition(self, definition, ref=None, cls=None):
"""
Converts any object to its troposphere equivalent, if applicable.
This function will recurse into lists and mappings to create
additional objects as necessary.
:param {*} definition: Object to convert
:param str ref: Name of key in parent dict that the provided definition
is from, can be None
:param type cls: Troposphere class which represents provided definition
"""
if isinstance(definition, Mapping):
if 'Type' in definition: # this is an AWS Resource
expected_type = None
if cls is not None:
expected_type = cls
else:
# if the user uses the custom way to name custom resources,
# we'll dynamically create a new subclass for this use and
# pass that instead of the typical CustomObject resource
try:
expected_type = self._generate_custom_type(
definition['Type'])
except TypeError:
# If definition['Type'] turns out not to be a custom
# type (aka doesn't start with "Custom::")
# Make sure expected_type is nothing (as
# it always should be)
assert not expected_type
if expected_type:
args = self._normalize_properties(definition)
return self._create_instance(expected_type, args, ref)
if len(definition) == 1: # This might be a function?
function_type = self._get_function_type(
definition.keys()[0])
if function_type:
return self._create_instance(
function_type, definition.values()[0])
# nothing special here - return as dict
d = {}
for k, v in definition.iteritems():
d[k] = self._convert_definition(v)
return d
elif (isinstance(definition, Sequence) and
not isinstance(definition, basestring)):
return [self._convert_definition(v) for v in definition]
# anything else is returned as-is
return definition
def _create_instance(self, cls, args, ref=None):
"""
Returns an instance of `cls` with `args` passed as arguments.
Recursively inspects `args` to create nested objects and functions as
necessary.
`cls` will only be considered only if it's an object we track
(i.e.: troposphere objects).
If `cls` has a `props` attribute, nested properties will be
instanciated as troposphere Property objects as necessary.
If `cls` is a list and contains a single troposphere type, the
returned value will be a list of instances of that type.
"""
if isinstance(cls, Sequence):
if len(cls) == 1:
# a list of 1 type means we must provide a list of such objects
if (isinstance(args, basestring) or
not isinstance(args, Sequence)):
args = [args]
return [self._create_instance(cls[0], v) for v in args]
if isinstance(cls, Sequence) or cls not in self.inspect_members:
# this object doesn't map to any known object. could be a string
# or int, or a Ref... or a list of types such as
# [basestring, FindInMap, Ref] or maybe a
# validator such as `integer` or `port_range`
return self._convert_definition(args)
elif issubclass(cls, AWSHelperFn):
# special handling for functions, we want to handle it before
# entering the other conditions.
try:
if issubclass(cls, Tags):
arg_dict = {}
for d in args:
arg_dict[d['Key']] = d['Value']
return cls(arg_dict)
if (isinstance(args, Sequence) and
not isinstance(args, basestring)):
return cls(*self._convert_definition(args))
if issubclass(cls, autoscaling.Metadata):
return self._generate_autoscaling_metadata(cls, args)
args = self._convert_definition(args)
if isinstance(args, Ref) and issubclass(cls, Ref):
# watch out for double-refs...
# this can happen if an object's .props has 'Ref'
# as the expected type (which is wrong and should be
# changed to basestring!)
return args
return cls(args)
except TypeError as ex:
if '__init__() takes exactly' not in ex.message:
raise
# special AWSHelperFn typically take lowercased parameters,
# but templates use uppercase. for this reason we cannot
# map to most of them, so we fallback with a generic one.
# this might not work for all types if they do extra
# processing in their init routine
return GenericHelperFn(args)
elif isinstance(args, Mapping):
# we try to build as many troposphere objects as we can by
# inspecting its type validation metadata
kwargs = {}
kwargs.update(args)
for prop_name in getattr(cls, 'props', []):
if prop_name not in kwargs:
continue # the user did not specify this value; skip it
expected_type = cls.props[prop_name][0]
if (isinstance(expected_type, Sequence) or
expected_type in self.inspect_members):
kwargs[prop_name] = self._create_instance(
expected_type, kwargs[prop_name], prop_name)
else:
kwargs[prop_name] = self._convert_definition(
kwargs[prop_name], prop_name)
args = self._convert_definition(kwargs)
if isinstance(args, Ref):
# use the returned ref instead of creating a new object
return args
assert isinstance(args, Mapping)
return cls(title=ref, **args)
return cls(self._convert_definition(args))
def _normalize_properties(self, definition):
"""
Inspects the definition and returns a copy of it that is updated
with any special property such as Condition, UpdatePolicy and the
like.
"""
args = definition.get('Properties', {}).copy()
if 'Condition' in definition:
args.update({'Condition': definition['Condition']})
if 'UpdatePolicy' in definition:
# there's only 1 kind of UpdatePolicy; use it
args.update({'UpdatePolicy': self._create_instance(
UpdatePolicy, definition['UpdatePolicy'])})
if 'CreationPolicy' in definition:
# there's only 1 kind of CreationPolicy; use it
args.update({'CreationPolicy': self._create_instance(
CreationPolicy, definition['CreationPolicy'])})
if 'DeletionPolicy' in definition:
# DeletionPolicity is very basic
args.update(
{'DeletionPolicy': self._convert_definition(
definition['DeletionPolicy'])})
if 'Metadata' in definition:
# there are various kind of metadata; pass it as-is
args.update(
{'Metadata': self._convert_definition(
definition['Metadata'])})
if 'DependsOn' in definition:
args.update(
{'DependsOn': self._convert_definition(
definition['DependsOn'])})
return args
def _generate_custom_type(self, resource_type):
"""
Dynamically allocates a new CustomResource class definition using the
specified Custom::SomeCustomName resource type. This special resource
type is equivalent to the AWS::CloudFormation::CustomResource.
"""
if not resource_type.startswith("Custom::"):
raise TypeError("Custom types must start with Custom::")
custom_type = type(
str(resource_type.replace("::", "")),
(self.inspect_resources['AWS::CloudFormation::CustomResource'],),
{'resource_type': resource_type})
self.inspect_members.add(custom_type)
self.inspect_resources[resource_type] = custom_type
return custom_type
def _generate_autoscaling_metadata(self, cls, args):
""" Provides special handling for the autoscaling.Metadata object """
assert isinstance(args, Mapping)
init_config = self._create_instance(
cloudformation.InitConfig,
args['AWS::CloudFormation::Init']['config'])
init = self._create_instance(
cloudformation.Init, {'config': init_config})
auth = None
if 'AWS::CloudFormation::Authentication' in args:
auth_blocks = {}
for k in args['AWS::CloudFormation::Authentication']:
auth_blocks[k] = self._create_instance(
cloudformation.AuthenticationBlock,
args['AWS::CloudFormation::Authentication'][k],
k)
auth = self._create_instance(
cloudformation.Authentication, auth_blocks)
return cls(init, auth)
def _get_function_type(self, function_name):
"""
Returns the function object that matches the provided name.
Only Fn:: and Ref functions are supported here so that other
functions specific to troposphere are skipped.
"""
if (function_name.startswith("Fn::") and
function_name[4:] in self.inspect_functions):
return self.inspect_functions[function_name[4:]]
return (self.inspect_functions['Ref'] if function_name == "Ref"
else None)
def _import_all_troposphere_modules(self):
""" Imports all troposphere modules and returns them """
dirname = os.path.join(os.path.dirname(__file__))
module_names = [
pkg_name
for importer, pkg_name, is_pkg in
pkgutil.walk_packages([dirname], prefix="troposphere.")
if not is_pkg and pkg_name not in self.DEPRECATED_MODULES]
module_names.append('troposphere')
modules = []
for name in module_names:
modules.append(importlib.import_module(name))
def members_predicate(m):
return inspect.isclass(m) and not inspect.isbuiltin(m)
members = []
for module in modules:
members.extend((m[1] for m in inspect.getmembers(
module, members_predicate)))
return set(members)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""a collection of model-related helper classes and functions"""
import json
import logging
import re
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional, Set, Union
import humanize
import pandas as pd
import pytz
import sqlalchemy as sa
import yaml
from flask import escape, g, Markup
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.models.mixins import AuditMixin
from flask_appbuilder.security.sqla.models import User
from sqlalchemy import and_, or_, UniqueConstraint
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import Mapper, Session
from sqlalchemy.orm.exc import MultipleResultsFound
from superset.utils.core import QueryStatus
logger = logging.getLogger(__name__)
def json_to_dict(json_str: str) -> Dict[Any, Any]:
if json_str:
val = re.sub(",[ \t\r\n]+}", "}", json_str)
val = re.sub(",[ \t\r\n]+\\]", "]", val)
return json.loads(val)
return {}
class ImportMixin:
export_parent: Optional[str] = None
# The name of the attribute
# with the SQL Alchemy back reference
export_children: List[str] = []
# List of (str) names of attributes
# with the SQL Alchemy forward references
export_fields: List[str] = []
# The names of the attributes
# that are available for import and export
__mapper__: Mapper
@classmethod
def _parent_foreign_key_mappings(cls) -> Dict[str, str]:
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
@classmethod
def _unique_constrains(cls) -> List[Set[str]]:
"""Get all (single column and multi column) unique constraints"""
unique = [
{c.name for c in u.columns}
for u in cls.__table_args__ # type: ignore
if isinstance(u, UniqueConstraint)
]
unique.extend(
{c.name} for c in cls.__table__.columns if c.unique # type: ignore
)
return unique
@classmethod
def parent_foreign_key_mappings(cls) -> Dict[str, str]:
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
@classmethod
def export_schema(
cls, recursive: bool = True, include_parent_ref: bool = False
) -> Dict[str, Any]:
"""Export schema as a dictionary"""
parent_excludes = set()
if not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {column.name for column in parent_ref.local_columns}
def formatter(column: sa.Column) -> str:
return (
"{0} Default ({1})".format(str(column.type), column.default.arg)
if column.default
else str(column.type)
)
schema: Dict[str, Any] = {
column.name: formatter(column)
for column in cls.__table__.columns # type: ignore
if (column.name in cls.export_fields and column.name not in parent_excludes)
}
if recursive:
for column in cls.export_children:
child_class = cls.__mapper__.relationships[column].argument.class_
schema[column] = [
child_class.export_schema(
recursive=recursive, include_parent_ref=include_parent_ref
)
]
return schema
@classmethod
def import_from_dict(
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals
cls,
session: Session,
dict_rep: Dict[Any, Any],
parent: Optional[Any] = None,
recursive: bool = True,
sync: Optional[List[str]] = None,
) -> Any:
"""Import obj from a dictionary"""
if sync is None:
sync = []
parent_refs = cls.parent_foreign_key_mappings()
export_fields = set(cls.export_fields) | set(parent_refs.keys())
new_children = {c: dict_rep[c] for c in cls.export_children if c in dict_rep}
unique_constrains = cls._unique_constrains()
filters = [] # Using these filters to check if obj already exists
# Remove fields that should not get imported
for k in list(dict_rep):
if k not in export_fields:
del dict_rep[k]
if not parent:
if cls.export_parent:
for prnt in parent_refs.keys():
if prnt not in dict_rep:
raise RuntimeError(
"{0}: Missing field {1}".format(cls.__name__, prnt)
)
else:
# Set foreign keys to parent obj
for k, v in parent_refs.items():
dict_rep[k] = getattr(parent, v)
# Add filter for parent obj
filters.extend([getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()])
# Add filter for unique constraints
ucs = [
and_(
*[
getattr(cls, k) == dict_rep.get(k)
for k in cs
if dict_rep.get(k) is not None
]
)
for cs in unique_constrains
]
filters.append(or_(*ucs))
# Check if object already exists in DB, break if more than one is found
try:
obj_query = session.query(cls).filter(and_(*filters))
obj = obj_query.one_or_none()
except MultipleResultsFound as ex:
logger.error(
"Error importing %s \n %s \n %s",
cls.__name__,
str(obj_query),
yaml.safe_dump(dict_rep),
)
raise ex
if not obj:
is_new_obj = True
# Create new DB object
obj = cls(**dict_rep) # type: ignore
logger.info("Importing new %s %s", obj.__tablename__, str(obj))
if cls.export_parent and parent:
setattr(obj, cls.export_parent, parent)
session.add(obj)
else:
is_new_obj = False
logger.info("Updating %s %s", obj.__tablename__, str(obj))
# Update columns
for k, v in dict_rep.items():
setattr(obj, k, v)
# Recursively create children
if recursive:
for child in cls.export_children:
child_class = cls.__mapper__.relationships[child].argument.class_
added = []
for c_obj in new_children.get(child, []):
added.append(
child_class.import_from_dict(
session=session, dict_rep=c_obj, parent=obj, sync=sync
)
)
# If children should get synced, delete the ones that did not
# get updated.
if child in sync and not is_new_obj:
back_refs = child_class.parent_foreign_key_mappings()
delete_filters = [
getattr(child_class, k) == getattr(obj, back_refs.get(k))
for k in back_refs.keys()
]
to_delete = set(
session.query(child_class).filter(and_(*delete_filters))
).difference(set(added))
for o in to_delete:
logger.info("Deleting %s %s", child, str(obj))
session.delete(o)
return obj
def export_to_dict(
self,
recursive: bool = True,
include_parent_ref: bool = False,
include_defaults: bool = False,
) -> Dict[Any, Any]:
"""Export obj to dictionary"""
cls = self.__class__
parent_excludes = set()
if recursive and not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
dict_rep = {
c.name: getattr(self, c.name)
for c in cls.__table__.columns # type: ignore
if (
c.name in self.export_fields
and c.name not in parent_excludes
and (
include_defaults
or (
getattr(self, c.name) is not None
and (not c.default or getattr(self, c.name) != c.default.arg)
)
)
)
}
if recursive:
for cld in self.export_children:
# sorting to make lists of children stable
dict_rep[cld] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
)
for child in getattr(self, cld)
],
key=lambda k: sorted(str(k.items())),
)
return dict_rep
def override(self, obj: Any) -> None:
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
def copy(self) -> Any:
"""Creates a copy of the dashboard without relationships."""
new_obj = self.__class__()
new_obj.override(self)
return new_obj
def alter_params(self, **kwargs: Any) -> None:
params = self.params_dict
params.update(kwargs)
self.params = json.dumps(params)
def remove_params(self, param_to_remove: str) -> None:
params = self.params_dict
params.pop(param_to_remove, None)
self.params = json.dumps(params)
def reset_ownership(self) -> None:
""" object will belong to the user the current user """
# make sure the object doesn't have relations to a user
# it will be filled by appbuilder on save
self.created_by = None
self.changed_by = None
# flask global context might not exist (in cli or tests for example)
self.owners = []
if g and hasattr(g, "user"):
self.owners = [g.user]
@property
def params_dict(self) -> Dict[Any, Any]:
return json_to_dict(self.params)
@property
def template_params_dict(self) -> Dict[Any, Any]:
return json_to_dict(self.template_params) # type: ignore
def _user_link(user: User) -> Union[Markup, str]:
if not user:
return ""
url = "/superset/profile/{}/".format(user.username)
return Markup('<a href="{}">{}</a>'.format(url, escape(user) or ""))
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = sa.Column(sa.DateTime, default=datetime.now, nullable=True)
changed_on = sa.Column(
sa.DateTime, default=datetime.now, onupdate=datetime.now, nullable=True
)
@declared_attr
def created_by_fk(self) -> sa.Column:
return sa.Column(
sa.Integer,
sa.ForeignKey("ab_user.id"),
default=self.get_user_id,
nullable=True,
)
@declared_attr
def changed_by_fk(self) -> sa.Column:
return sa.Column(
sa.Integer,
sa.ForeignKey("ab_user.id"),
default=self.get_user_id,
onupdate=self.get_user_id,
nullable=True,
)
@property
def changed_by_name(self) -> str:
if self.changed_by:
return escape("{}".format(self.changed_by))
return ""
@renders("created_by")
def creator(self) -> Union[Markup, str]:
return _user_link(self.created_by)
@property
def changed_by_(self) -> Union[Markup, str]:
return _user_link(self.changed_by)
@renders("changed_on")
def changed_on_(self) -> Markup:
return Markup(f'<span class="no-wrap">{self.changed_on}</span>')
@renders("changed_on")
def changed_on_delta_humanized(self) -> str:
return self.changed_on_humanized
@renders("changed_on")
def changed_on_utc(self) -> str:
# Convert naive datetime to UTC
return self.changed_on.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
@property
def changed_on_humanized(self) -> str:
return humanize.naturaltime(datetime.now() - self.changed_on)
@renders("changed_on")
def modified(self) -> Markup:
return Markup(f'<span class="no-wrap">{self.changed_on_humanized}</span>')
class QueryResult: # pylint: disable=too-few-public-methods
"""Object returned by the query interface"""
def __init__( # pylint: disable=too-many-arguments
self,
df: pd.DataFrame,
query: str,
duration: timedelta,
status: str = QueryStatus.SUCCESS,
error_message: Optional[str] = None,
errors: Optional[List[Dict[str, Any]]] = None,
) -> None:
self.df = df
self.query = query
self.duration = duration
self.status = status
self.error_message = error_message
self.errors = errors or []
class ExtraJSONMixin:
"""Mixin to add an `extra` column (JSON) and utility methods"""
extra_json = sa.Column(sa.Text, default="{}")
@property
def extra(self) -> Dict[str, Any]:
try:
return json.loads(self.extra_json)
except (TypeError, JSONDecodeError) as exc:
logger.error(
"Unable to load an extra json: %r. Leaving empty.", exc, exc_info=True
)
return {}
def set_extra_json(self, extras: Dict[str, Any]) -> None:
self.extra_json = json.dumps(extras)
def set_extra_json_key(self, key: str, value: Any) -> None:
extra = self.extra
extra[key] = value
self.extra_json = json.dumps(extra)
| |
# pbmqtt.py Implement MQTT on Pyboard using an ESP8266
# The ESP8266 runs mqtt.py on startup.
# Boards are electrically connected as per the README.
# asyncio version
# On fatal error performs hardware reset on ESP8266.
# Author: Peter Hinch.
# Copyright Peter Hinch 2017-2021 Released under the MIT license.
# SynCom throughput 118 char/s measured 8th Aug 2017 sending a publication
# while application running. ESP8266 running at 160MHz. (Chars are 7 bits).
import uasyncio as asyncio
from utime import localtime, gmtime, time
from syncom import SynCom
from status_values import * # Numeric status values shared with user code.
__version__ = (0, 1, 0)
defaults = {
'user_start' : (lambda *_ : None, ()),
'fast' : True,
'mqtt_user' : '',
'mqtt_pw' : '',
'ssl' : False,
'ssl_params' : repr({}),
'use_default_net' : True,
'port' : 0,
'keepalive' : 60,
'ping_interval' : 0,
'clean_session' : True,
'debug' : False, # ESP8266 verbose
'verbose' : False, # Host
'timeout' : 10,
'max_repubs' : 4,
'response_time' : 10,
'wifi_handler' : (lambda *_ : None, ()),
'crash_handler' : (lambda *_ : None, ()),
'status_handler' : None,
'timeserver' : 'pool.ntp.org',
}
def buildinit(d):
ituple = ('init', d['ssid'], d['password'], d['broker'], d['mqtt_user'],
d['mqtt_pw'], d['ssl_params'], int(d['use_default_net']), d['port'], int(d['ssl']),
int(d['fast']), d['keepalive'], int(d['debug']),
int(d['clean_session']), d['max_repubs'], d['response_time'], d['ping_interval'],
d['timeserver'])
return argformat(*ituple)
# _WIFI_DOWN is bad during initialisation
_BAD_STATUS = (BROKER_FAIL, WIFI_DOWN, UNKNOWN)
_DIRE_STATUS = (BROKER_FAIL, UNKNOWN) # Always fatal
# Format an arbitrary list of positional args as a status_values.SEP separated string
def argformat(*a):
return SEP.join(['{}' for x in range(len(a))]).format(*a)
def printtime():
print('{:02d}:{:02d}:{:02d} '.format(localtime()[3], localtime()[4], localtime()[5]), end='')
def qos_check(qos):
if not isinstance(qos, int) or not (qos == 0 or qos == 1):
raise ValueError('Only qos 0 and 1 are supported.')
async def heartbeat(led):
while True:
await asyncio.sleep_ms(500)
led(not led())
# Replace to handle status changes. In the case of fatal status values the
# ESP8266 will be rebooted on return. You may want to pause for remedial
# action before the reboot. Information statuses can be ignored with rapid
# return. Cases which may require attention:
# SPECNET return 1 to try specified network or 0 to reboot ESP. Code below
# tries specified LAN (if default LAN fails) on first run only to limit
# flash wear.
# BROKER_FAIL Pause for server fix? Return (and so reboot) after delay?
async def default_status_handler(mqtt_link, status):
await asyncio.sleep_ms(0)
if status == SPECNET:
if mqtt_link.first_run:
mqtt_link.first_run = False
return 1 # By default try specified network on 1st run only
asyncio.sleep(30) # Pause before reboot.
return 0 # Return values are for user handlers: see pb_status.py
# Pub topics and messages restricted to 7 bits, 0 and 127 disallowed.
def validate(s, item):
s = s.encode('UTF8')
if any(True for a in s if a == 0 or a >= 127):
raise ValueError('Illegal character in {} in {}'.format(s, item))
class MQTTlink:
lw_topic = None
lw_msg = None
lw_retain = False
lw_qos = 0
@classmethod
def will(cls, topic, msg, retain=False, qos=0):
cls.lw_topic = topic
cls.lw_msg = msg
cls.lw_retain = retain
cls.lw_qos = qos
status_msgs = ('connected to broker', 'awaiting broker',
'awaiting default network', 'awaiting specified network',
'publish OK', 'running', 'unknown', 'Will registered', 'Fail to connect to broker',
'WiFi up', 'WiFi down')
def __init__(self, *args, **kwargs):
d = defaults # d is the config dict: initially populate with default values
for arg in args:
d.update(arg) # Hardware and network configs
d.update(kwargs)
# d is now populated.
self.user_start = d['user_start']
shan = d['status_handler']
self.s_han = (default_status_handler, ()) if shan is None else shan # coro
self.crash_han = d['crash_handler']
self.wifi_han = d['wifi_handler']
self.init_str = buildinit(d)
self.keepalive = d['keepalive']
self._evtrun = asyncio.Event()
self.verbose = d['verbose']
# Watchdog timeout for ESP8266 (ms).
wdog = d['timeout'] * 1000
# SynCom string mode
self.channel = SynCom(False, d['sckin'], d['sckout'], d['srx'], d['stx'],
d['reset'], wdog, True, self.verbose)
if 'led' in d:
asyncio.create_task(heartbeat(d['led']))
# Start the SynCom instance. This will run self.start(). If an error
# occurs self.quit() is called which returns to Syncom's start() method.
# This waits on ._die before forcing a failure on the ESP8266 and re-running
# self.start() to perform a clean restart.
asyncio.create_task(self.channel.start(self.start, self._die))
self.subs = {} # (callback, qos, args) indexed by topic
self.publock = asyncio.Lock()
self.puback = asyncio.Event()
self.evttim = asyncio.Event()
self.evtwifi = asyncio.Event()
# Only specify network on first run
self.first_run = True
self._time = 0 # NTP time. Invalid.
# ESP8266 returns seconds from 2000 because I believed the docs.
# Maintainers change the epoch on a whim.
# Calculate ofsets in CPython using datetime.date:
# (date(2000, 1, 1) - date(1970, 1, 1)).days * 24*60*60
epoch = {2000 : 0, 1970 : 946684800} # Offset to add.
self._epoch_fix = epoch[gmtime(0)[0]] # Find offset on current platform
# API
async def publish(self, topic, msg, retain=False, qos=0):
qos_check(qos)
validate(topic, 'topic') # Raise ValueError if invalid.
validate(msg, 'message')
await self.ready()
async with self.publock:
self.channel.send(argformat(PUBLISH, topic, msg, int(retain), qos))
# Wait for ESP8266 to complete. Quick if qos==0. May be very slow
# in an outage
await self.puback.wait()
async def subscribe(self, topic, qos, callback, *args):
qos_check(qos)
# Save subscription to resubscribe after outage
self.subs[topic] = (callback, qos, args)
# Subscribe
await self.ready()
self.channel.send(argformat(SUBSCRIBE, topic, qos))
# Command handled directly by mqtt.py on ESP8266 e.g. MEM
async def command(self, *argsend):
await self.ready()
self.channel.send(argformat(*argsend))
def running(self):
return self._evtrun.is_set()
async def ready(self):
await self._evtrun.wait()
await self.evtwifi.wait()
def wifi(self):
return self.evtwifi.is_set()
# Attempt to retrieve NTP time in secs since 2000 or device epoch
async def get_time(self, pause=120, y2k=False):
delta = 0 if y2k else self._epoch_fix
self.evttim.clear() # Defensive
self._time = 0 # Invalidate
while True:
await self.ready()
self.channel.send(TIME)
try:
await asyncio.wait_for(self.evttim.wait(), pause // 2)
except asyncio.TimeoutError:
pass
if self._time:
return self._time + delta # Fix epoch
await asyncio.sleep(pause)
# API END
def _do_time(self, action): # TIME received from ESP8266
try:
self._time = int(action[0])
except ValueError: # Gibberish.
self._time = 0 # May be 0 if ESP has signalled failure
self.evttim.set()
self.evttim.clear()
async def _die(self): # ESP has crashed. Run user callback if provided.
cb, args = self.crash_han
cb(self, *args)
await asyncio.sleep_ms(0)
# Convenience method allows this error code:
# return self.quit(message)
# This method is called from self.start. Note that return is to SynCom's
# .start method which will launch ._die
def quit(self, *args):
if args is not ():
self.verbose and print(*args)
self._evtrun.clear()
def get_cmd(self, istr):
ilst = istr.split(SEP)
return ilst[0], ilst[1:]
def do_status(self, action, last_status):
try:
iact = int(action[0])
except ValueError: # Gibberish from ESP8266: caller reboots
return UNKNOWN
if iact == PUBOK:
# Occurs after all publications. If qos==0 immediately, otherwise
# when PUBACK received from broker. May take a long time in outage.
# If a crash occurs, puback is cleared by start()
self.puback.set()
self.puback.clear()
elif iact == RUNNING:
self._evtrun.set()
if iact == WIFI_UP:
self.evtwifi.set()
elif iact == WIFI_DOWN:
self.evtwifi.clear()
# Detect WiFi status changes. Ignore initialisation and repeats
if last_status != -1 and iact != last_status and iact in (WIFI_UP, WIFI_DOWN):
cb, args = self.wifi_han
cb(self.evtwifi.is_set(), self, *args)
if self.verbose:
if iact != UNKNOWN:
if iact != last_status: # Ignore repeats
printtime()
print('Status: ', self.status_msgs[iact])
else:
printtime()
print('Unknown status: {} {}'.format(action[1], action[2]))
return iact
# start() is run each time the channel acquires sync i.e. on startup and also
# after an ESP8266 crash and reset.
# Behaviour after fatal error with ESP8266:
# It clears ._evtrun to cause local tasks to terminate, then returns.
# A return causes the local channel instance to launch .die then issues
# a hardware reset to the ESP8266 (See SynCom.start() and .run() methods).
# After acquiring sync, start() gets rescheduled.
async def start(self, channel):
self.verbose and print('Starting...')
self.puback.set() # If a crash occurred while a pub was pending
self.puback.clear() # let it terminate and release the lock
self.evttim.set() # Likewise for get_time: let it return fail status.
self.evttim.clear()
await asyncio.sleep_ms(0)
self._evtrun.clear() # Set by .do_status
s_task, s_args = self.s_han
if self.lw_topic is not None:
channel.send(argformat(WILL, self.lw_topic, self.lw_msg, self.lw_retain, self.lw_qos))
res = await channel.await_obj()
if res is None: # SynCom timeout
await s_task(self, ESP_FAIL, *s_args)
return self.quit('ESP8266 fail 1. Resetting.')
command, action = self.get_cmd(res)
if command == STATUS:
iact = self.do_status(action, -1)
await s_task(self, iact, *s_args)
if iact in _BAD_STATUS:
return self.quit('Bad status: {}. Resetting.'.format(iact))
else:
self.verbose and print('Expected will OK got: ', command, action)
channel.send(self.init_str)
while not self._evtrun.is_set(): # Until RUNNING status received
res = await channel.await_obj()
if res is None:
await s_task(self, ESP_FAIL, *s_args)
return self.quit('ESP8266 fail 2. Resetting.')
command, action = self.get_cmd(res)
if command == STATUS:
iact = self.do_status(action, -1)
result = await s_task(self, iact, *s_args)
if iact == SPECNET:
if result == 1:
channel.send('1') # Any string will do
else:
return self.quit()
if iact in _BAD_STATUS:
return self.quit('Bad status. Resetting.')
else:
self.verbose and print('Init got: ', command, action)
# On power up this will do nothing because user awaits .ready
# before subscribing, so self.subs will be empty.
for topic in self.subs:
qos = self.subs[topic][1]
self.channel.send(argformat(SUBSCRIBE, topic, qos))
self.verbose and print('About to run user program.')
if self.user_start[0] is not None:
self.user_start[0](self, *self.user_start[1]) # User start function
cb, args = self.wifi_han
cb(True, self, *args)
# Initialisation is complete. Process messages from ESP8266.
iact = -1 # Invalidate last status for change detection
while True: # print incoming messages, handle subscriptions
chan_state = channel.any()
if chan_state is None: # SynCom Timeout
self._evtrun.clear()
elif chan_state > 0:
res = await channel.await_obj()
command, action = self.get_cmd(res)
if command == SUBSCRIPTION:
if action[0] in self.subs: # topic found
cb, qos, args = self.subs[action[0]]
action += args
# Callback gets topic, message, retained, plus any user args
cb(*action) # Run the callback
elif command == STATUS: # 1st arg of status is an integer
iact = self.do_status(action, iact) # Update pub q and wifi status
await s_task(self, iact, *s_args)
if iact in _DIRE_STATUS:
return self.quit('Fatal status. Resetting.')
elif command == TIME:
self._do_time(action)
elif command == MEM: # Wouldn't ask for this if we didn't want a printout
print('ESP8266 RAM free: {} allocated: {}'.format(action[0], action[1]))
else:
await s_task(self, UNKNOWN, *s_args)
return self.quit('Got unhandled command, resetting ESP8266:', command, action) # ESP8266 has failed
await asyncio.sleep_ms(20)
if not self._evtrun.is_set(): # self.quit() has been called.
await s_task(self, NO_NET, *s_args)
return self.quit('Not running, resetting ESP8266')
| |
# Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import idaapi
import idautils
import idc
import itertools
import struct
import inspect
import ida_ua
import ida_bytes
_DEBUG_FILE = None
_DEBUG_PREFIX = ""
_INFO = idaapi.get_inf_structure()
# Map of the external functions names which does not return to a tuple containing information
# like the number of agruments and calling convention of the function.
_NORETURN_EXTERNAL_FUNC = {}
FUNC_LSDA_ENTRIES = collections.defaultdict()
IS_ARM = "ARM" in _INFO.procName
# True if we are running on an ELF file.
IS_ELF = (idaapi.f_ELF == _INFO.filetype) or \
(idc.GetLongPrm(idc.INF_FILETYPE) == idc.FT_ELF)
# True if this is a Windows PE file.
IS_PE = idaapi.f_PE == _INFO.filetype
if IS_ARM:
from arm_util import *
else:
from x86_util import *
def INIT_DEBUG_FILE(file):
global _DEBUG_FILE
_DEBUG_FILE = file
def DEBUG_PUSH():
global _DEBUG_PREFIX
_DEBUG_PREFIX += " "
def DEBUG_POP():
global _DEBUG_PREFIX
_DEBUG_PREFIX = _DEBUG_PREFIX[:-2]
def DEBUG(s):
global _DEBUG_FILE
if _DEBUG_FILE:
_DEBUG_FILE.write("{}{}\n".format(_DEBUG_PREFIX, str(s)))
# Python 2.7's xrange doesn't work with `long`s.
def xrange(begin, end=None, step=1):
if end:
return iter(itertools.count(begin, step).next, end)
else:
return iter(itertools.count().next, begin)
_NOT_INST_EAS = set()
# sign extension to the given bits
def sign_extend(x, b):
m = 1 << (b - 1)
x = x & ((1 << b) - 1)
return (x ^ m) - m
# Returns `True` if `ea` belongs to some code segment.
#
# TODO(pag): This functon is extra aggressive, in that it doesn't strictly
# trust the `idc.is_code`. I have observed cases where data in
# `.bss` is treated as code and I am not sure why. Perhaps adding
# a reference to the data did this.
#
# I think it has something to do with ELF thunks, e.g. entries in
# the `.plt` section. When I made this function stricter,
# `mcsema-lift` would report issues where it needed to add tail-calls
# to externals.
def is_code(ea):
if is_invalid_ea(ea):
return False
seg_ea = idc.get_segm_start(ea)
seg_type = idc.get_segm_attr(seg_ea, idc.SEGATTR_TYPE)
return (seg_type == idc.SEG_CODE)
# A stricter form of `is_code`, where we also check whether IDA thinks something
# is code. IDA is able to identify some things like embedded exception tables
# in the code section as not truly being code.
def is_code_by_flags(ea):
if not is_code(ea):
return False
flags = idc.get_full_flags(ea)
return idc.is_code(flags)
def is_read_only_segment(ea):
mask_perms = idaapi.SEGPERM_WRITE | idaapi.SEGPERM_READ
perms = idc.get_segm_attr(ea, idc.SEGATTR_PERM)
return idaapi.SEGPERM_READ == (perms & mask_perms)
def is_tls_segment(ea):
try:
seg_name = idc.get_segm_name(ea)
return seg_name in (".tbss", ".tdata", ".tls")
except:
return False
# Returns `True` if `ea` looks like a thread-local thing.
def is_tls(ea):
if is_invalid_ea(ea):
return False
if is_tls_segment(ea):
return True
# Something references `ea`, and that something is commented as being a
# `TLS-reference`. This comes up if you have an thread-local extern variable
# declared/used in a binary, and defined in a shared lib. There will be an
# offset variable.
for source_ea in _drefs_to(ea):
comment = idc.GetCommentEx(source_ea, 0)
if isinstance(comment, str) and "TLS-reference" in comment:
return True
return False
# Mark an address as containing code.
def try_mark_as_code(ea):
if is_code(ea) and not is_code_by_flags(ea):
idc.MakeCode(ea)
idaapi.auto_wait()
return True
return False
def mark_as_not_code(ea):
global _NOT_INST_EAS
_NOT_INST_EAS.add(ea)
def read_bytes_slowly(start, end):
bytestr = []
for i in xrange(start, end):
if idc.has_value(idc.get_full_flags(i)):
bt = idc.get_wide_byte(i)
bytestr.append(chr(bt))
else:
bytestr.append("\x00")
return "".join(bytestr)
def read_byte(ea):
byte = read_bytes_slowly(ea, ea + 1)
byte = ord(byte)
return byte
def read_word(ea):
bytestr = read_bytes_slowly(ea, ea + 2)
word = struct.unpack("<L", bytestr)[0]
return word
def read_dword(ea):
bytestr = read_bytes_slowly(ea, ea + 4)
dword = struct.unpack("<L", bytestr)[0]
return dword
def read_qword(ea):
bytestr = read_bytes_slowly(ea, ea + 8)
qword = struct.unpack("<Q", bytestr)[0]
return qword
def read_leb128(ea, signed):
""" Read LEB128 encoded data
"""
val = 0
shift = 0
while True:
byte = idc.get_wide_byte(ea)
val |= (byte & 0x7F) << shift
shift += 7
ea += 1
if (byte & 0x80) == 0:
break
if shift > 64:
DEBUG("Bad leb128 encoding at {0:x}".format(ea - shift/7))
return idc.BADADDR
if signed and (byte & 0x40):
val -= (1<<shift)
return val, ea
def read_pointer(ea):
if _INFO.is_64bit():
return read_qword(ea)
else:
return read_dword(ea)
def instruction_personality(arg):
global PERSONALITIES
if isinstance(arg, (int, long)):
arg, _ = decode_instruction(arg)
try:
p = PERSONALITIES[arg.itype]
except AttributeError:
p = PERSONALITY_NORMAL
return fixup_personality(arg, p)
def is_conditional_jump(arg):
return instruction_personality(arg) == PERSONALITY_CONDITIONAL_BRANCH
def is_unconditional_jump(arg):
return instruction_personality(arg) in (PERSONALITY_DIRECT_JUMP, PERSONALITY_INDIRECT_JUMP)
def is_direct_jump(arg):
return instruction_personality(arg) == PERSONALITY_DIRECT_JUMP
def is_indirect_jump(arg):
return instruction_personality(arg) == PERSONALITY_INDIRECT_JUMP
def is_function_call(arg):
return instruction_personality(arg) in (PERSONALITY_DIRECT_CALL, PERSONALITY_INDIRECT_CALL)
def is_indirect_function_call(arg):
return instruction_personality(arg) == PERSONALITY_INDIRECT_CALL
def is_direct_function_call(arg):
return instruction_personality(arg) == PERSONALITY_DIRECT_CALL
def is_return(arg):
return instruction_personality(arg) == PERSONALITY_RETURN
def is_control_flow(arg):
return instruction_personality(arg) != PERSONALITY_NORMAL
def instruction_ends_block(arg):
return instruction_personality(arg) in (PERSONALITY_CONDITIONAL_BRANCH,
PERSONALITY_DIRECT_JUMP,
PERSONALITY_INDIRECT_JUMP,
PERSONALITY_RETURN,
PERSONALITY_TERMINATOR,
PERSONALITY_SYSTEM_RETURN)
def is_invalid_ea(ea):
"""Returns `True` if `ea` is not valid, i.e. it doesn't point into any
valid segment."""
if (idc.BADADDR == ea) or \
(idc.get_segm_name(ea) == "LOAD"):
return True
try:
idc.get_segm_attr(idc.get_segm_start(ea), idc.SEGATTR_TYPE)
return False # If we get here, then it must be a valid ea!
except:
return True
_BAD_INSTRUCTION = (None, "")
def decode_instruction(ea):
"""Read the bytes of an x86/amd64 instruction. This handles things like
combining the bytes of an instruction with its prefix. IDA Pro sometimes
treats these as separate."""
global _NOT_INST_EAS, _BAD_INSTRUCTION, PREFIX_ITYPES
if ea in _NOT_INST_EAS:
return _BAD_INSTRUCTION
decoded_inst = ida_ua.insn_t()
inslen = ida_ua.decode_insn(decoded_inst, ea)
if inslen <= 0:
_NOT_INST_EAS.add(ea)
return _BAD_INSTRUCTION
assert decoded_inst.ea == ea
end_ea = ea + decoded_inst.size
decoded_bytes = read_bytes_slowly(ea, end_ea)
# We've got an instruction with a prefix, but the prefix is treated as
# independent.
if 1 == decoded_inst.size and decoded_inst.itype in PREFIX_ITYPES:
decoded_inst, extra_bytes = decode_instruction(end_ea)
decoded_bytes += extra_bytes
return decoded_inst, decoded_bytes
_NOT_EXTERNAL_SEGMENTS = set([idc.BADADDR])
_EXTERNAL_SEGMENTS = set()
def segment_contains_external_function_pointers(seg_ea):
"""Returns `True` if a segment contains pointers to external functions."""
try:
seg_name = idc.get_segm_name(seg_ea)
return seg_name.lower() in (".idata", ".plt.got")
except:
return False
def is_external_segment_by_flags(ea):
"""Returns `True` if IDA believes that `ea` belongs to an external segment."""
try:
seg_ea = idc.get_segm_start(ea)
seg_type = idc.get_segm_attr(seg_ea, idc.SEGATTR_TYPE)
if seg_type == idc.SEG_XTRN:
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
else:
return False
except:
return False
def is_external_segment(ea):
"""Returns `True` if the segment containing `ea` looks to be solely containing
external references."""
global _NOT_EXTERNAL_SEGMENTS
seg_ea = idc.get_segm_start(ea)
if seg_ea in _NOT_EXTERNAL_SEGMENTS:
return False
if seg_ea in _EXTERNAL_SEGMENTS:
return True
if is_external_segment_by_flags(ea):
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
ext_types = []
seg_name = idc.get_segm_name(seg_ea).lower()
if IS_ELF:
if ".got" in seg_name or ".plt" in seg_name:
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
elif IS_PE:
if ".idata" == seg_name: # Import table.
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
_NOT_EXTERNAL_SEGMENTS.add(seg_ea)
return False
def is_constructor_segment(ea):
"""Returns `True` if the segment containing `ea` belongs to global constructor section"""
seg_ea = idc.get_segm_start(ea)
seg_name = idc.get_segm_name(seg_ea).lower()
if seg_name in [".init_array", ".ctor"]:
return True
return False
def is_destructor_segment(ea):
"""Returns `True` if the segment containing `ea` belongs to global destructor section"""
seg_ea = idc.get_segm_start(ea)
seg_name = idc.get_segm_name(seg_ea).lower()
if seg_name in [".fini_array", ".dtor"]:
return True
return False
def get_destructor_segment():
"""Returns the start address of the global destructor section"""
for seg_ea in idautils.Segments():
seg_name = idc.get_segm_name(seg_ea).lower()
if seg_name in [".fini_array", ".dtor"]:
return seg_ea;
def is_internal_code(ea):
if is_invalid_ea(ea):
return False
if is_external_segment(ea):
return False
if is_code(ea):
return True
# find stray 0x90 (NOP) bytes in .text that IDA
# thinks are data items.
flags = idc.get_full_flags(ea)
if idaapi.is_align(flags):
if not try_mark_as_code(ea):
return False
return True
return False
def is_block_or_instruction_head(ea):
"""Returns `True` if `ea` looks like it's the beginning of an actual
instruction."""
return is_internal_code(ea) and idc.get_item_head(ea) == ea
def get_address_size_in_bits():
"""Returns the available address size."""
global _INFO
if _INFO.is_64bit():
return 64
else:
return 32
def get_address_size_in_bytes():
return get_address_size_in_bits() / 8
_FORCED_NAMES = {}
# Tries to set the name of a symbol. IDA can be pretty dumb when symbol names
# conflict with register names, so we have the backup path of splatting things
# into a dictionary.
def set_symbol_name(ea, name):
global _FORCED_NAMES
flags = idaapi.SN_PUBLIC | idaapi.SN_NOCHECK | idaapi.SN_NON_AUTO | idaapi.SN_NOWARN
_FORCED_NAMES[ea] = name
idc.set_name(ea, name, flags)
# Tries to get the name of a symbol.
def get_symbol_name(from_ea, ea=None, allow_dummy=False):
if ea is None:
ea = from_ea
global _FORCED_NAMES
if ea in _FORCED_NAMES:
return _FORCED_NAMES[ea]
flags = idc.get_full_flags(ea)
if not allow_dummy and idaapi.has_dummy_name(flags):
return ""
name = ""
try:
name = name or idc.get_name(ea, 0) #calc_gtn_flags(from_ea, ea))
except:
pass
try:
name = name or idc.get_func_name(ea)
except:
pass
return name
def get_function_bounds(ea):
"""Get the bounds of the function containing `ea`. We want to discover jump
table targets that are missed by IDA, and it's possible that they aren't
marked as being part of the current function, and perhaps are after the
assumed range of the current function. Ideally they will fall before the
beginning of the next function, though.
We need to be pretty careful with the case that one function tail-calls
another. IDA will sometimes treat the end of the tail-called function
(e.g. a thunk) as if it is the end of the caller. For this reason, we start
with loose bounds using the prev/next functions, then try to narrow with
the bounds of the function containing `ea`.
TODO(pag): Handle discontinuous regions (e.g. because of function chunks).
It may be worth to return an object here that can we queried
for membership using the `__in__` method.
"""
seg_start, seg_end = idc.get_segm_start(ea), idc.get_segm_end(ea)
min_ea = seg_start
max_ea = seg_end
if is_invalid_ea(min_ea) or not is_code(ea):
return ea, ea
# Get an upper bound using the next function.
next_func_ea = idc.get_next_func(ea)
if not is_invalid_ea(next_func_ea):
max_ea = min(next_func_ea, max_ea)
# Get a lower bound using the previous function.
prev_func_ea = idc.get_prev_func(ea)
if not is_invalid_ea(prev_func_ea):
min_ea = max(min_ea, prev_func_ea)
prev_func = idaapi.get_func(prev_func_ea)
if prev_func and prev_func.end_ea < ea:
min_ea = max(min_ea, prev_func.end_ea)
# Try to tighten the bounds using the function containing `ea`.
func = idaapi.get_func(ea)
if func:
min_ea = max(min_ea, func.start_ea)
max_ea = min(max_ea, func.end_ea)
return min_ea, max_ea
def noreturn_external_function(fname, args, realconv, ret, sign):
""" Update the map of external functions which does not return; The basic
block terminates on seeing a call to the functions
"""
global _NORETURN_EXTERNAL_FUNC
if fname:
_NORETURN_EXTERNAL_FUNC[fname] = (args, realconv, ret, sign)
def is_noreturn_external_function(ea):
"""Returns `True` if ea refers to an external function which does not return.
"""
target_ea = get_reference_target(ea)
return get_symbol_name(target_ea) in _NORETURN_EXTERNAL_FUNC
def is_noreturn_function(ea):
"""Returns `True` if the function at `ea` is a no-return function."""
flags = idc.get_func_attr(ea, idc.FUNCATTR_FLAGS)
return 0 < flags and \
(flags & idaapi.FUNC_NORET) and \
ea not in FUNC_LSDA_ENTRIES.keys() and \
"cxa_throw" not in get_symbol_name(ea)
_CREFS_FROM = collections.defaultdict(set)
_DREFS_FROM = collections.defaultdict(set)
_CREFS_TO = collections.defaultdict(set)
_DREFS_TO = collections.defaultdict(set)
def make_xref(from_ea, to_ea, data_type, xref_size):
"""Force the data at `from_ea` to reference the data at `to_ea`."""
if not idc.get_full_flags(to_ea) or is_invalid_ea(to_ea):
DEBUG(" Not making reference (A) from {:x} to {:x}".format(from_ea, to_ea))
return False
make_head(from_ea)
if is_code(from_ea):
_CREFS_FROM[from_ea].add(to_ea)
_CREFS_TO[to_ea].add(from_ea)
else:
_DREFS_FROM[from_ea].add(to_ea)
_DREFS_TO[to_ea].add(from_ea)
# If we can't make a head, then it probably means that we're at the
# end of the binary, e.g. the last thing in the `.extern` segment.
# or in the middle of structure. Return False in such case
#
# NOTE(artem): Commenting out since this breaks recovery of C++ applications
# with IDA7. The failure occurs when processign references in .init_array
# when the below code is enabled, those references are not treated as
# references because make_head fails.
#
#if not make_head(from_ea + xref_size):
# return False
ida_bytes.del_items(from_ea, idc.DELIT_EXPAND, xref_size)
if data_type == idc.FF_QWORD:
data_size = 8
elif data_type == idc.FF_DWORD:
data_size = 4
else:
raise ValueError("Invalid data type")
idc.create_data(from_ea, data_type, data_size, idaapi.BADADDR)
if not is_code_by_flags(from_ea):
idc.add_dref(from_ea, to_ea, idc.XREF_USER|idc.dr_O)
else:
DEBUG(" Not making reference (B) from {:x} to {:x}".format(from_ea, to_ea))
return True
_IGNORE_DREF = (lambda x: [idc.BADADDR])
_IGNORE_CREF = (lambda x, y: [idc.BADADDR])
def _stop_looking_for_xrefs(ea):
"""This is a heuristic to decide whether or not we should stop looking for
cross-references. It is relevant to IDA structs, where IDA will treat structs
and everything in them as one single 'thing', and so all xrefs embedded within
a struct will actually be associated with the first EA of the struct. So
if we're in a struct or something like it, and the item size is bigger than
the address size, then we will assume it's actually in a struct."""
if is_external_segment(ea):
return False
if is_code(ea):
return False
addr_size = get_address_size_in_bytes()
item_size = idc.get_item_size(ea)
return item_size > addr_size
def _xref_generator(ea, get_first, get_next):
target_ea = get_first(ea)
while not is_invalid_ea(target_ea):
yield target_ea
target_ea = get_next(ea, target_ea)
def drefs_from(ea, only_one=False, check_fixup=True):
seen = False
has_one = only_one
fixup_ea = idc.BADADDR
if check_fixup:
fixup_ea = idc.get_fixup_target_off(ea)
if not is_invalid_ea(fixup_ea) and not is_code(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in _xref_generator(ea, idaapi.get_first_dref_from, idaapi.get_next_dref_from):
if target_ea != fixup_ea and not is_invalid_ea(target_ea):
seen = only_one
yield target_ea
if seen:
return
if not seen and ea in _DREFS_FROM:
for target_ea in _DREFS_FROM[ea]:
yield target_ea
seen = only_one
if seen:
return
def crefs_from(ea, only_one=False, check_fixup=True):
flags = idc.get_full_flags(ea)
if not idc.is_code(flags):
return
fixup_ea = idc.BADADDR
seen = False
has_one = only_one
if check_fixup:
fixup_ea = idc.get_fixup_target_off(ea)
if not is_invalid_ea(fixup_ea) and is_code(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in _xref_generator(ea, idaapi.get_first_cref_from, idaapi.get_next_cref_from):
if target_ea != fixup_ea and not is_invalid_ea(target_ea):
seen = only_one
yield target_ea
if seen:
return
if not seen and ea in _CREFS_FROM:
for target_ea in _CREFS_FROM[ea]:
seen = only_one
yield target_ea
if seen:
return
def xrefs_from(ea, only_one=False):
fixup_ea = idc.get_fixup_target_off(ea)
seen = False
has_one = only_one
if not is_invalid_ea(fixup_ea):
seen = only_one
has_one = True
yield fixup_ea
if has_one and _stop_looking_for_xrefs(ea):
return
for target_ea in drefs_from(ea, only_one, check_fixup=False):
if target_ea != fixup_ea:
seen = only_one
yield target_ea
if seen:
return
for target_ea in crefs_from(ea, only_one, check_fixup=False):
if target_ea != fixup_ea:
seen = only_one
yield target_ea
if seen:
return
def _drefs_to(ea):
for source_ea in _xref_generator(ea, idaapi.get_first_dref_to, idaapi.get_next_dref_to):
yield source_ea
def _crefs_to(ea):
for source_ea in _xref_generator(ea, idaapi.get_first_cref_to, idaapi.get_next_cref_to):
yield source_ea
def _xrefs_to(ea):
for source_ea in _drefs_to(ea):
yield source_ea
for source_ea in _crefs_to(ea):
yield source_ea
def _reference_checker(ea, dref_finder=_IGNORE_DREF, cref_finder=_IGNORE_CREF):
"""Looks for references to/from `ea`, and does some sanity checks on what
IDA returns."""
for ref_ea in dref_finder(ea):
return True
for ref_ea in cref_finder(ea):
return True
return False
def remove_all_refs(ea):
"""Remove all references to something."""
assert False
dref_eas = list(drefs_from(ea))
cref_eas = list(crefs_from(ea))
for ref_ea in dref_eas:
idaapi.del_dref(ea, ref_ea)
for ref_ea in cref_eas:
idaapi.del_cref(ea, ref_ea, False)
idaapi.del_cref(ea, ref_ea, True)
def is_thunk(ea):
"""Returns true if some address is a known to IDA to be a thunk."""
flags = idc.get_func_attr(ea, idc.FUNCATTR_FLAGS)
return (idc.BADADDR != flags) and 0 < flags and 0 != (flags & 0x00000080L)
def is_referenced(ea):
"""Returns `True` if the data at `ea` is referenced by something else."""
return _reference_checker(ea, _drefs_to, _crefs_to)
def is_referenced_by(ea, by_ea):
for ref_ea in _drefs_to(ea):
if ref_ea == by_ea:
return True
for ref_ea in _crefs_to(ea):
if ref_ea == by_ea:
return True
return False
def is_runtime_external_data_reference(ea):
"""This can happen in ELF binaries, where you'll have somehting like
`stdout@@GLIBC_2.2.5` in the `.bss` section, where at runtime the
linker will fill in the slot with a pointer to the real `stdout`.
IDA discovers this type of reference, but it has no real way to
cross-reference it to anything, because the target address will
only exist at runtime."""
comment = idc.GetCommentEx(ea, 0)
if comment and "Copy of shared data" in comment:
return True
else:
return False
def is_external_vtable_reference(ea):
""" It checks the references of external vtable in the .bss section, where
it is referred as the `Copy of shared data`. There is no way to resolve
the cross references for these vtable as the target address will only
appear during runtime.
It is introduced to avoid lazy initilization of runtime typeinfo variables
which gets referred by the user-defined exception types.
"""
if not is_runtime_external_data_reference(ea):
return False
comment = idc.GetCommentEx(ea, 0)
if comment and "Alternative name is '`vtable" in comment:
return True
else:
return
def is_reference(ea):
"""Returns `True` if the `ea` references something else."""
if is_invalid_ea(ea):
return False
for target in xrefs_from(ea):
return True
return is_runtime_external_data_reference(ea)
def is_data_reference(ea):
"""Returns `True` if the `ea` references something else."""
if is_invalid_ea(ea):
return False
for target_ea in drefs_from(ea):
return True
return is_runtime_external_data_reference(ea)
def has_flow_to_code(ea):
"""Returns `True` if there are any control flows to the instruction at
`ea`."""
return _reference_checker(ea, cref_finder=idautils.CodeRefsTo)
def get_reference_target(ea):
for ref_ea in xrefs_from(ea, True):
return ref_ea
# This is kind of funny, but it works with how we understand external
# variable references from the CFG production and LLVM side. Really,
# we need a unique location for every reference (internal and external).
# For external references, the location itself is not super important, it's
# used for identification in the LLVM side of things.
#
# When creating cross-references, we need that ability to identify the
# "target" of the cross-reference, and again, that can be anything so long
# as other parts of the code agree on the target.
if is_runtime_external_data_reference(ea):
return ea
return idc.BADADDR
def is_head(ea):
return idc.is_head(idc.get_full_flags(ea))
# Make the data at `ea` into a head.
def make_head(ea):
flags = idc.get_full_flags(ea)
if not idc.is_head(flags):
# idc.SetFlags(ea, flags | idc.FF_DATA)
idc.create_data(ea, idc.FF_BYTE, 1, idc.BADADDR)
idaapi.auto_wait()
return is_head(ea)
return True
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.vision_v1p3beta1.types import product_search_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ProductSearchTransport(abc.ABC):
"""Abstract transport class for ProductSearch."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
DEFAULT_HOST: str = "vision.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_product_set: gapic_v1.method.wrap_method(
self.create_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_product_sets: gapic_v1.method.wrap_method(
self.list_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product_set: gapic_v1.method.wrap_method(
self.get_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product_set: gapic_v1.method.wrap_method(
self.update_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product_set: gapic_v1.method.wrap_method(
self.delete_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_product: gapic_v1.method.wrap_method(
self.create_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products: gapic_v1.method.wrap_method(
self.list_products,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product: gapic_v1.method.wrap_method(
self.get_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product: gapic_v1.method.wrap_method(
self.update_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product: gapic_v1.method.wrap_method(
self.delete_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_reference_image: gapic_v1.method.wrap_method(
self.create_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_reference_image: gapic_v1.method.wrap_method(
self.delete_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_reference_images: gapic_v1.method.wrap_method(
self.list_reference_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_reference_image: gapic_v1.method.wrap_method(
self.get_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.add_product_to_product_set: gapic_v1.method.wrap_method(
self.add_product_to_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.remove_product_from_product_set: gapic_v1.method.wrap_method(
self.remove_product_from_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products_in_product_set: gapic_v1.method.wrap_method(
self.list_products_in_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.import_product_sets: gapic_v1.method.wrap_method(
self.import_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_product_set(
self,
) -> Callable[
[product_search_service.CreateProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def list_product_sets(
self,
) -> Callable[
[product_search_service.ListProductSetsRequest],
Union[
product_search_service.ListProductSetsResponse,
Awaitable[product_search_service.ListProductSetsResponse],
],
]:
raise NotImplementedError()
@property
def get_product_set(
self,
) -> Callable[
[product_search_service.GetProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def update_product_set(
self,
) -> Callable[
[product_search_service.UpdateProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def delete_product_set(
self,
) -> Callable[
[product_search_service.DeleteProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_product(
self,
) -> Callable[
[product_search_service.CreateProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def list_products(
self,
) -> Callable[
[product_search_service.ListProductsRequest],
Union[
product_search_service.ListProductsResponse,
Awaitable[product_search_service.ListProductsResponse],
],
]:
raise NotImplementedError()
@property
def get_product(
self,
) -> Callable[
[product_search_service.GetProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def update_product(
self,
) -> Callable[
[product_search_service.UpdateProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def delete_product(
self,
) -> Callable[
[product_search_service.DeleteProductRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_reference_image(
self,
) -> Callable[
[product_search_service.CreateReferenceImageRequest],
Union[
product_search_service.ReferenceImage,
Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def delete_reference_image(
self,
) -> Callable[
[product_search_service.DeleteReferenceImageRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_reference_images(
self,
) -> Callable[
[product_search_service.ListReferenceImagesRequest],
Union[
product_search_service.ListReferenceImagesResponse,
Awaitable[product_search_service.ListReferenceImagesResponse],
],
]:
raise NotImplementedError()
@property
def get_reference_image(
self,
) -> Callable[
[product_search_service.GetReferenceImageRequest],
Union[
product_search_service.ReferenceImage,
Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def add_product_to_product_set(
self,
) -> Callable[
[product_search_service.AddProductToProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def remove_product_from_product_set(
self,
) -> Callable[
[product_search_service.RemoveProductFromProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_products_in_product_set(
self,
) -> Callable[
[product_search_service.ListProductsInProductSetRequest],
Union[
product_search_service.ListProductsInProductSetResponse,
Awaitable[product_search_service.ListProductsInProductSetResponse],
],
]:
raise NotImplementedError()
@property
def import_product_sets(
self,
) -> Callable[
[product_search_service.ImportProductSetsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("ProductSearchTransport",)
| |
import re, math
from django.contrib.gis.db import models
from gass.bering.utils import *
class Station(models.Model):
'''
Ablatometer station, idealized i.e. designation B01 is re-used each year.
'''
site = models.CharField(max_length=255, unique=True)
operational = models.BooleanField(help_text="Indicates that the station data should be reported")
upload_path = models.TextField(help_text="File system path to the file or directory where data are uploaded")
single_file = models.BooleanField(help_text="Indicates that data uploads are aggregate in a single file, specified by the record's upload_path", default=True)
utc_offset = models.IntegerField(help_text="The UTC offset, in hours, positive or negative")
init_height_cm = models.FloatField(verbose_name='initial sensor height in cm', help_text="The initial height of the instrument box, in centimeters")
def __unicode__(self):
return str(self.site)
def __str__(self):
return str(self.site)
def clean(self, *args, **kwargs):
'''
Validates and/or cleans input before saving to the databse.
'''
# Force site names to be lowercase
self.site = str(self.site).lower()
class SiteVisit(models.Model):
'''
An in situ field visit, particular notable where ablatometer height was
adjusted manually.
'''
site = models.ForeignKey(Station, to_field='site')
datetime = models.DateTimeField(help_text="Date and time of site visit, in UTC")
ablato_adjusted = models.BooleanField(verbose_name='ablatometer was adjusted', default=True)
ablato_height_cm = models.FloatField(verbose_name='ablatometer height after adjustment in cm', blank=True, null=True)
notes = models.TextField()
class Meta:
get_latest_by = 'datetime'
def __unicode__(self):
return '%s: %d (%s)' % (self.site.upper(), self.ablato_height_cm,
self.datetime.strftime('%Y-%m-%d %H:%M:%S'))
class Campaign(models.Model):
'''
Ablation measurement campaign.
'''
site = models.ForeignKey(Station, to_field='site')
season = models.IntegerField(help_text="The year the ablatometer was deployed")
deployment = models.DateField(help_text="Date of the deployment")
recovery = models.DateField(help_text="Date of recovery")
region = models.CharField(max_length=255, help_text="General description of the deployed location e.g. Tashalich Arm")
has_uplink = models.BooleanField(help_text="Indicates that the instrument was equipped with a satellite uplink")
site_visits = models.ManyToManyField(SiteVisit, blank=True, null=True)
class Meta:
get_latest_by = 'deployment'
unique_together = ('site', 'season')
def __unicode__(self):
return '%s (%d)' % (self.site_id.upper(), self.season)
class Ablation(models.Model):
'''
Ablation measurement.
'''
objects = models.GeoManager()
valid = models.BooleanField(editable=False, help_text="Indiates whether the observation record is valid (this flag set by instrument only)")
site = models.ForeignKey(Station, to_field='site')
sats = models.IntegerField(verbose_name='satellites', help_text='Number of satellites')
hdop = models.FloatField(help_text='Horizontal dilution of precision (HDOP)', null=True)
time = models.TimeField(help_text="This is a naive time, not time-zone aware")
date = models.DateField()
datetime = models.DateTimeField(help_text='Date and time of measurement from GPS')
lat = models.FloatField(help_text='Latitude (Deg, Dec. Min. N)')
lng = models.FloatField(help_text='Longitude (Deg, Dec. Min. W)')
gps_valid = models.BooleanField(help_text="Indicates whether the GPS measurements are valid", default=True)
elev = models.FloatField(verbose_name='altitude (m)')
rng_cm = models.FloatField(verbose_name='acoustic range (cm)')
rng_cm_valid = models.BooleanField(help_text="Indicates whether the range measurement is valid", default=True)
above = models.IntegerField(verbose_name='irradiance')
below = models.IntegerField(verbose_name='reflectance')
wind_spd = models.FloatField(verbose_name='wind speed (m/s)')
temp_C = models.FloatField(verbose_name='temperature (C)')
volts = models.FloatField(verbose_name='battery voltage (V)')
point = models.PointField(srid=4326)
class Meta:
get_latest_by = 'datetime'
def __unicode__(self):
return '[%s] %s at %s' % (str(self.site_id),
str(self.date), str(self.time))
@classmethod
def get_field_names(self, string=None):
'''
Returns a list of field names that match an optional string that can be
parsed as a regular expression.
'''
names = self._meta.get_all_field_names()
if string:
return [name for name in names if re.compile(string).match(name) != None]
else:
return [name for name in names]
@classmethod
def get_base_field_names(self):
return ('site_id', 'sats', 'hdop', 'datetime', 'lat', 'lng', 'elev',
'rng_cm', 'above', 'below', 'wind_spd', 'temp_C', 'volts')
def get_previous_record(self, *args, **kwargs):
'''
'''
td = datetime.timedelta(hours=1)
foretime = self.datetime + td # An hour later
backtime = self.datetime - td # An hour earlier
# Get a window of observations around this sorted datetime descending
window = Ablation.objects.filter(site__exact=self.site,
datetime__range=(backtime, foretime)).order_by('-datetime')
# Find the first adjacent record earlier in time
if len(window) > 1:
for each in window:
if each.datetime.replace(tzinfo=UTC()) < self.datetime:
return each
elif len(window) == 1:
# Only if the record is previous in time...
if window[0].datetime.replace(tzinfo=UTC()) < self.datetime:
return window[0]
else: return None
# No adjacent measurements in time
elif len(window) == 0: return None
else: return ValueError
def clean(self, *args, **kwargs):
'''
Accepts a tzinfo keyword argument where tzinfo is an instance of
datetime.tzinfo that can be passed to the replace() method.
'''
if isinstance(self.valid, str):
if self.valid == 'A': self.valid = True
else: self.valid = False
if isinstance(self.lng, str):
# For now, force the negation of longitude values (raw data don't distinguish)
self.lng = -float(Lng(self.lng).value)
if isinstance(self.lat, str):
self.lat = Lat(self.lat).value
if isinstance(self.date, str):
self.date = Date(self.date).value
if isinstance(self.time, str):
# Django does not support timezone-aware times, only datetimes
self.time = Time(self.time).value
self.datetime = datetime.datetime.combine(self.date,
self.time).replace(tzinfo=kwargs['tzinfo'])
# For now, force the negation of longitude values
self.point = 'POINT(%s %s)' % (-float(self.lng), self.lat)
self.rng_cm = float(self.rng_cm)
self.check_flags()
def check_flags(self, *args, **kwargs):
'''
A validation procedure setting gps_valid and rng_cm_valid flags.
'''
last = self.get_previous_record()
# TEST: Sufficient satellite constellation?
if self.sats < 3:
self.gps_valid = False
# No test for hdop; do that in database queries where concerned
# TEST: Obviously bogus acoustic measurements (greater than 600 cm)?
if self.rng_cm > 600.0:
self.rng_cm_valid = False
try:
if last is None: return None
except UnboundLocalError:
return None
datetime_diff = abs((self.datetime - last.datetime.replace(tzinfo=UTC())).seconds)
rng_cm_diff = self.rng_cm - last.rng_cm
# TEST: Indpendent measurements?
if datetime_diff < 1600:
# Expected that measurements no more frequent than every 20 minutes
self.gps_valid = False
# TEST: Likely bogus acoustic measurements?
if datetime_diff < (60*60*3) and rng_cm_diff > 5.0 and last.rng_cm < 600.0:
# Closely-separated measurements in time (less than 3 hours)
# with more than 5 cm melt are likely invalid
self.rng_cm_valid = False
# TEST: Likely bogus acoustic measurements?
if not last.rng_cm_valid and rng_cm_diff > 0.0:
# The last range measurement was invalid and this one is greater
self.rng_cm_valid = False
def geographic_distance(self, obj):
'''
Calculates the distance, in meters, between the position of this
measurement and another.
Accepts:
obj {Ablation} Another Ablation model instance
Returns:
{Float} The net migration, in meters, between the two observations
'''
lat_m_per_degree = 111412.0
lng_m_per_degree = 55800.0
# 111,412 m/degree of latitude at 60 degrees north latitude
# (from National Geospatial Intelligence Agency)
# 55,800 m/degree of longitude at 60 degrees north latitude
# (from National Geospatial Intelligence Agency)
# http://msi.nga.mil/MSISiteContent/StaticFiles/Calculators/degree.html
lat_diff_m = abs(lat_m_per_degree*(self.lat - obj.lat))
lng_diff_m = abs(lng_m_per_degree*(self.lng - obj.lng))
# Simple distance estimate in meters between last and last observation
distance_m = math.sqrt((lat_diff_m*lat_diff_m) + (lng_diff_m*lng_diff_m))
return distance_m
class B1Ablation(models.Model):
'''Ablation measurement at GASS B01; identical to B2Ablation model.'''
satellites = models.IntegerField('Number of Satellites')
hdop = models.FloatField('Dilution of Precision', null=True)
time = models.TimeField('Time')
date = models.DateField('Date')
# datetime = models.DateTimeField('Date and Time', unique=True)
# We tried making datetime unique before but that led to
# database errors that could not be resolved when there
# was an attempt to insert a duplicate record
datetime = models.DateTimeField('Date and Time', primary_key=True)
# By using datetime as the primary key, we ensure that:
# a) Duplicate records in the raw data are entered as one
# record with the most recent meteorological and
# ablation data
# b) When updating the database, we can pull records from
# a consolidated file of all existing records without
# fear of re-inserting records already in the database
lat = models.DecimalField('Latitude (Deg, Dec. Min. N)', max_digits=8, decimal_places=5)
lng = models.DecimalField('Longitude (Deg, Dec. Min. W)', max_digits=9, decimal_places=5)
gps_ok = models.BooleanField('Position is Valid')
acoustic_range_cm = models.DecimalField('Acoustic Range (cm)', max_digits=5, decimal_places=2)
optical_range_cm = models.DecimalField('Optical Range (cm)', max_digits=10, decimal_places=5)
# sensor_height = models.DecimalField('Sensor Height at Installation', max_digits=5, decimal_places=2)
ablation_ok = models.BooleanField('Ablation is Valid')
top_light = models.IntegerField('Irradiance)')
bottom_light = models.IntegerField('Reflectance)')
wind_m_s = models.DecimalField('Wind Speed (m/s)', max_digits=5, decimal_places=2)
# temp_C is allowed to be null because negative temperature measurements
# currently can't be handled (hardware issue)
temp_C = models.DecimalField('Temperature (C)', max_digits=4, decimal_places=1, null=True)
voltage = models.FloatField('Battery Voltage (V)')
def __unicode__(self):
return str(self.date) + ', ' + str(self.time)
class B2Ablation(models.Model):
'''Ablation measurement at GASS B02; almost identical to B1Ablation model (has elevation).'''
satellites = models.IntegerField('Number of Satellites')
hdop = models.FloatField('Dilution of Precision', null=True)
time = models.TimeField('Time')
date = models.DateField('Date')
datetime = models.DateTimeField('Date and Time', primary_key=True)
lat = models.DecimalField('Latitude (Deg, Dec. Min. N)', max_digits=8, decimal_places=5)
lng = models.DecimalField('Longitude (Deg, Dec. Min. W)', max_digits=9, decimal_places=5)
elev = models.DecimalField('Elevation', max_digits=4, decimal_places=1, blank=True, null=True)
gps_ok = models.BooleanField('Position is Valid')
acoustic_range_cm = models.DecimalField('Acoustic Range (cm)', max_digits=5, decimal_places=2)
optical_range_cm = models.DecimalField('Optical Range (cm)', max_digits=10, decimal_places=5)
# sensor_height = models.DecimalField('Sensor Height at Installation', max_digits=5, decimal_places=2)
ablation_ok = models.BooleanField('Ablation is Valid')
top_light = models.IntegerField('Irradiance)')
bottom_light = models.IntegerField('Reflectance)')
wind_m_s = models.DecimalField('Wind Speed (m/s)', max_digits=5, decimal_places=2)
temp_C = models.DecimalField('Temperature (C)', max_digits=4, decimal_places=1, null=True)
voltage = models.FloatField('Battery Voltage (V)')
def __unicode__(self):
return str(self.date) + ', ' + str(self.time)
class B4Ablation(models.Model):
'''Ablation measurement at GASS B04; identical to B1Ablation model.'''
satellites = models.IntegerField('Number of Satellites')
hdop = models.FloatField('Dilution of Precision', null=True)
time = models.TimeField('Time')
date = models.DateField('Date')
datetime = models.DateTimeField('Date and Time', primary_key=True)
lat = models.DecimalField('Latitude (Deg, Dec. Min. N)', max_digits=8, decimal_places=5)
lng = models.DecimalField('Longitude (Deg, Dec. Min. W)', max_digits=9, decimal_places=5)
gps_ok = models.BooleanField('Position is Valid')
acoustic_range_cm = models.DecimalField('Acoustic Range (cm)', max_digits=5, decimal_places=2)
optical_range_cm = models.DecimalField('Optical Range (cm)', max_digits=10, decimal_places=5)
# sensor_height = models.DecimalField('Sensor Height at Installation', max_digits=5, decimal_places=2)
ablation_ok = models.BooleanField('Ablation is Valid')
top_light = models.IntegerField('Irradiance)')
bottom_light = models.IntegerField('Reflectance)')
wind_m_s = models.DecimalField('Wind Speed (m/s)', max_digits=5, decimal_places=2)
temp_C = models.DecimalField('Temperature (C)', max_digits=4, decimal_places=1, null=True)
voltage = models.FloatField('Battery Voltage (V)')
def __unicode__(self):
return str(self.date) + ', ' + str(self.time)
class B6Ablation(models.Model):
'''Ablation measurement at GASS B06; identical to B1Ablation model.'''
satellites = models.IntegerField('Number of Satellites')
hdop = models.FloatField('Dilution of Precision', null=True)
time = models.TimeField('Time')
date = models.DateField('Date')
datetime = models.DateTimeField('Date and Time', primary_key=True)
lat = models.DecimalField('Latitude (Deg, Dec. Min. N)', max_digits=8, decimal_places=5)
lng = models.DecimalField('Longitude (Deg, Dec. Min. W)', max_digits=9, decimal_places=5)
gps_ok = models.BooleanField('Position is Valid')
acoustic_range_cm = models.DecimalField('Acoustic Range (cm)', max_digits=5, decimal_places=2)
optical_range_cm = models.DecimalField('Optical Range (cm)', max_digits=10, decimal_places=5)
# sensor_height = models.DecimalField('Sensor Height at Installation', max_digits=5, decimal_places=2)
ablation_ok = models.BooleanField('Ablation is Valid')
top_light = models.IntegerField('Irradiance)')
bottom_light = models.IntegerField('Reflectance)')
wind_m_s = models.DecimalField('Wind Speed (m/s)', max_digits=5, decimal_places=2)
temp_C = models.DecimalField('Temperature (C)', max_digits=4, decimal_places=1, null=True)
voltage = models.FloatField('Battery Voltage (V)')
def __unicode__(self):
return str(self.date) + ', ' + str(self.time)
class T1Ablation(models.Model):
'''Ablation measurement at GASS T01; identical to B1Ablation model.'''
satellites = models.IntegerField('Number of Satellites')
hdop = models.FloatField('Dilution of Precision', null=True)
time = models.TimeField('Time')
date = models.DateField('Date')
datetime = models.DateTimeField('Date and Time', primary_key=True)
lat = models.DecimalField('Latitude (Deg, Dec. Min. N)', max_digits=8, decimal_places=5)
lng = models.DecimalField('Longitude (Deg, Dec. Min. W)', max_digits=9, decimal_places=5)
gps_ok = models.BooleanField('Position is Valid')
acoustic_range_cm = models.DecimalField('Acoustic Range (cm)', max_digits=5, decimal_places=2)
optical_range_cm = models.DecimalField('Optical Range (cm)', max_digits=10, decimal_places=5)
# sensor_height = models.DecimalField('Sensor Height at Installation', max_digits=5, decimal_places=2)
ablation_ok = models.BooleanField('Ablation is Valid')
top_light = models.IntegerField('Irradiance)')
bottom_light = models.IntegerField('Reflectance)')
wind_m_s = models.DecimalField('Wind Speed (m/s)', max_digits=5, decimal_places=2)
temp_C = models.DecimalField('Temperature (C)', max_digits=4, decimal_places=1, null=True)
voltage = models.FloatField('Battery Voltage (V)')
def __unicode__(self):
return str(self.date) + ', ' + str(self.time)
| |
import unittest
from conans.test.utils.tools import TestClient, TestServer
from conans.paths import PACKAGES_FOLDER, CONANINFO, EXPORT_FOLDER, CONAN_MANIFEST
import os
from conans.model.manifest import FileTreeManifest
import shutil
from conans import COMPLEX_SEARCH_CAPABILITY
conan_vars1 = '''
[settings]
arch=x64
os=Windows
compiler=Visual Studio
compiler.version=8.1
[options]
use_Qt=True
[full_requires]
Hello2/0.1@lasote/stable:11111
OpenSSL/2.10@lasote/testing:2222
HelloInfo1/0.45@fenix/testing:33333
'''
conan_vars1b = '''
[settings]
arch=x86
compiler=gcc
compiler.version=4.3
compiler.libcxx=libstdc++
[options]
use_Qt=True
'''
conan_vars1c = '''
[settings]
os=Linux
arch=x86
compiler=gcc
compiler.version=4.5
compiler.libcxx=libstdc++11
[options]
use_Qt=False
[full_requires]
Hello2/0.1@lasote/stable:11111
OpenSSL/2.10@lasote/testing:2222
HelloInfo1/0.45@fenix/testing:33333
[recipe_hash]
d41d8cd98f00b204e9800998ecf8427e
''' # The recipe_hash correspond to the faked conanmanifests in export
conan_vars2 = '''
[options]
use_OpenGL=True
[settings]
arch=x64
os=Ubuntu
version=15.04
'''
conan_vars3 = '''
[options]
HAVE_TESTS=True
USE_CONFIG=False
[settings]
os=Darwin
'''
conan_vars4 = """[settings]
os=Windows
arch=x86_64
compiler=gcc
[options]
language=1
[full_requires]
Hello2/0.1@lasote/stable:11111
OpenSSL/2.10@lasote/testing:2222
HelloInfo1/0.45@fenix/testing:33333
"""
class SearchTest(unittest.TestCase):
def setUp(self):
self.servers = {"local": TestServer(server_capabilities=[]),
"search_able": TestServer(server_capabilities=[COMPLEX_SEARCH_CAPABILITY])}
self.client = TestClient(servers=self.servers)
# No conans created
self.client.run("search")
output = self.client.user_io.out
self.assertIn('There are no packages', output)
# Conans with and without packages created
self.root_folder1 = 'Hello/1.4.10/fenix/testing'
root_folder2 = 'helloTest/1.4.10/fenix/stable'
root_folder3 = 'Bye/0.14/fenix/testing'
root_folder4 = 'NodeInfo/1.0.2/fenix/stable'
root_folder5 = 'MissFile/1.0.2/fenix/stable'
root_folder11 = 'Hello/1.4.11/fenix/testing'
root_folder12 = 'Hello/1.4.12/fenix/testing'
self.client.save({"Empty/1.10/fake/test/reg/fake.txt": "//",
"%s/%s/WindowsPackageSHA/%s" % (self.root_folder1,
PACKAGES_FOLDER,
CONANINFO): conan_vars1,
"%s/%s/WindowsPackageSHA/%s" % (root_folder11,
PACKAGES_FOLDER,
CONANINFO): conan_vars1,
"%s/%s/WindowsPackageSHA/%s" % (root_folder12,
PACKAGES_FOLDER,
CONANINFO): conan_vars1,
"%s/%s/PlatformIndependantSHA/%s" % (self.root_folder1,
PACKAGES_FOLDER,
CONANINFO): conan_vars1b,
"%s/%s/LinuxPackageSHA/%s" % (self.root_folder1,
PACKAGES_FOLDER,
CONANINFO): conan_vars1c,
"%s/%s/a44f541cd44w57/%s" % (root_folder2,
PACKAGES_FOLDER,
CONANINFO): conan_vars2,
"%s/%s/e4f7vdwcv4w55d/%s" % (root_folder3,
PACKAGES_FOLDER,
CONANINFO): conan_vars3,
"%s/%s/e4f7vdwcv4w55d/%s" % (root_folder4,
PACKAGES_FOLDER,
CONANINFO): conan_vars4,
"%s/%s/e4f7vdwcv4w55d/%s" % (root_folder5,
PACKAGES_FOLDER,
"hello.txt"): "Hello"},
self.client.paths.store)
# Fake some manifests to be able to calculate recipe hash
fake_manifest = FileTreeManifest(1212, {})
self.client.save({os.path.join(self.root_folder1, EXPORT_FOLDER, CONAN_MANIFEST): str(fake_manifest),
os.path.join(root_folder2, EXPORT_FOLDER, CONAN_MANIFEST): str(fake_manifest),
os.path.join(root_folder3, EXPORT_FOLDER, CONAN_MANIFEST): str(fake_manifest),
os.path.join(root_folder4, EXPORT_FOLDER, CONAN_MANIFEST): str(fake_manifest),
},
self.client.paths.store)
def recipe_search_test(self):
self.client.run("search Hello*")
self.assertEquals("Existing package recipes:\n\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n"
"helloTest/1.4.10@fenix/stable\n", self.client.user_io.out)
self.client.run("search Hello* --case-sensitive")
self.assertEquals("Existing package recipes:\n\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n",
self.client.user_io.out)
self.client.run("search *fenix* --case-sensitive")
self.assertEquals("Existing package recipes:\n\n"
"Bye/0.14@fenix/testing\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n"
"MissFile/1.0.2@fenix/stable\n"
"NodeInfo/1.0.2@fenix/stable\n"
"helloTest/1.4.10@fenix/stable\n", self.client.user_io.out)
def recipe_pattern_search_test(self):
self.client.run("search Hello*")
self.assertEquals("Existing package recipes:\n\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n"
"helloTest/1.4.10@fenix/stable\n", self.client.user_io.out)
self.client.run("search Hello* --case-sensitive")
self.assertEquals("Existing package recipes:\n\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n", self.client.user_io.out)
self.client.run("search *fenix* --case-sensitive")
self.assertEquals("Existing package recipes:\n\n"
"Bye/0.14@fenix/testing\n"
"Hello/1.4.10@fenix/testing\n"
"Hello/1.4.11@fenix/testing\n"
"Hello/1.4.12@fenix/testing\n"
"MissFile/1.0.2@fenix/stable\n"
"NodeInfo/1.0.2@fenix/stable\n"
"helloTest/1.4.10@fenix/stable\n", self.client.user_io.out)
def package_search_with_invalid_reference_test(self):
self.client.run("search Hello -q 'a=1'", ignore_error=True)
self.assertIn("-q parameter only allowed with a valid recipe", str(self.client.user_io.out))
def package_search_with_empty_query_test(self):
self.client.run("search Hello/1.4.10/fenix/testing")
self.assertIn("WindowsPackageSHA", self.client.user_io.out)
self.assertIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
def package_search_nonescaped_characters_test(self):
self.client.run('search Hello/1.4.10@fenix/testing -q "compiler=gcc AND compiler.libcxx=libstdc++11"')
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
self.assertNotIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10@fenix/testing -q "compiler=gcc AND compiler.libcxx=libstdc++"')
self.assertNotIn("LinuxPackageSHA", self.client.user_io.out)
self.assertIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
# Now search with a remote
os.rmdir(self.servers["local"].paths.store)
shutil.copytree(self.client.paths.store, self.servers["local"].paths.store)
self.client.run("remove Hello* -f")
self.client.run('search Hello/1.4.10@fenix/testing -q "compiler=gcc AND compiler.libcxx=libstdc++11" -r local')
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
self.assertNotIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10@fenix/testing -q "compiler=gcc AND compiler.libcxx=libstdc++" -r local')
self.assertNotIn("LinuxPackageSHA", self.client.user_io.out)
self.assertIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
def _assert_pkg_q(self, query, packages_found, remote):
command = 'search Hello/1.4.10@fenix/testing -q \'%s\'' % query
if remote:
command += " -r %s" % remote
self.client.run(command)
for pack_name in ["LinuxPackageSHA", "PlatformIndependantSHA", "WindowsPackageSHA"]:
self.assertEquals(pack_name in self.client.user_io.out,
pack_name in packages_found, "%s fail" % pack_name)
def package_search_complex_queries_test(self):
def test_cases(remote=None):
if remote: # Simulate upload to remote
os.rmdir(self.servers[remote].paths.store)
shutil.copytree(self.client.paths.store, self.servers[remote].paths.store)
q = ''
self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA",
"WindowsPackageSHA"], remote)
q = 'compiler="gcc"'
self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA"], remote)
q = 'compiler=' # No packages found with empty value
self._assert_pkg_q(q, [], remote)
q = 'compiler="gcc" OR compiler.libcxx=libstdc++11'
# Should find Visual because of the OR, visual doesn't care about libcxx
self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA",
"WindowsPackageSHA"], remote)
q = '(compiler="gcc" AND compiler.libcxx=libstdc++11) OR compiler.version=4.5'
self._assert_pkg_q(q, ["LinuxPackageSHA"], remote)
q = '(compiler="gcc" AND compiler.libcxx=libstdc++11) OR '\
'(compiler.version=4.5 OR compiler.version=8.1)'
self._assert_pkg_q(q, ["LinuxPackageSHA", "WindowsPackageSHA"], remote)
q = '(compiler="gcc" AND compiler.libcxx=libstdc++) OR '\
'(compiler.version=4.5 OR compiler.version=8.1)'
self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA",
"WindowsPackageSHA"], remote)
q = '(compiler="gcc" AND compiler.libcxx=libstdc++) OR '\
'(compiler.version=4.3 OR compiler.version=8.1)'
self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote)
q = '(os="Linux" OR os=Windows)'
self._assert_pkg_q(q, ["PlatformIndependantSHA", "LinuxPackageSHA",
"WindowsPackageSHA"], remote)
q = '(os="Linux" OR os=Windows) AND use_Qt=True'
self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote)
q = '(os="Linux" OR os=Windows) AND use_Qt=True AND nonexistant_option=3'
self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote)
q = '(os="Linux" OR os=Windows) AND use_Qt=True OR nonexistant_option=3'
self._assert_pkg_q(q, ["PlatformIndependantSHA",
"WindowsPackageSHA", "LinuxPackageSHA"], remote)
# test in local
test_cases()
# test in remote
test_cases(remote="local")
# test in remote with search capabilities
test_cases(remote="search_able")
def package_search_with_invalid_query_test(self):
self.client.run("search Hello/1.4.10/fenix/testing -q 'invalid'", ignore_error=True)
self.assertIn("Invalid package query: invalid", self.client.user_io.out)
self.client.run("search Hello/1.4.10/fenix/testing -q 'os= 3'", ignore_error=True)
self.assertIn("Invalid package query: os= 3", self.client.user_io.out)
self.client.run("search Hello/1.4.10/fenix/testing -q 'os=3 FAKE '", ignore_error=True)
self.assertIn("Invalid package query: os=3 FAKE ", self.client.user_io.out)
self.client.run("search Hello/1.4.10/fenix/testing -q 'os=3 os.compiler=4'", ignore_error=True)
self.assertIn("Invalid package query: os=3 os.compiler=4", self.client.user_io.out)
self.client.run("search Hello/1.4.10/fenix/testing -q 'not os=3 AND os.compiler=4'", ignore_error=True)
self.assertIn("Invalid package query: not os=3 AND os.compiler=4. 'not' operator is not allowed", self.client.user_io.out)
self.client.run("search Hello/1.4.10/fenix/testing -q 'os=3 AND !os.compiler=4'", ignore_error=True)
self.assertIn("Invalid package query: os=3 AND !os.compiler=4. '!' character is not allowed", self.client.user_io.out)
def package_search_properties_filter_test(self):
# All packages without filter
self.client.run("search Hello/1.4.10/fenix/testing -q ''")
self.assertIn("WindowsPackageSHA", self.client.user_io.out)
self.assertIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q os=Windows')
self.assertIn("WindowsPackageSHA", self.client.user_io.out)
self.assertIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertNotIn("LinuxPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q "os=Windows AND compiler.version=4.5"')
self.assertIn("There are no packages for reference 'Hello/1.4.10@fenix/testing' matching the query 'os=Windows AND compiler.version=4.5'", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q "os=Linux AND compiler.version=4.5"')
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
self.assertNotIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q "compiler.version=1.0"')
self.assertIn("There are no packages for reference 'Hello/1.4.10@fenix/testing' matching the query 'compiler.version=1.0'", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q "compiler=gcc AND compiler.version=4.5"')
self.assertNotIn("WindowsPackageSHA", self.client.user_io.out)
self.assertNotIn("PlatformIndependantSHA", self.client.user_io.out)
self.assertIn("LinuxPackageSHA", self.client.user_io.out)
self.client.run('search Hello/1.4.10/fenix/testing -q "arch=x86"')
# One package will be outdated from recipe and another don't
self.assertEquals("""Existing packages for recipe Hello/1.4.10@fenix/testing:
Package_ID: LinuxPackageSHA
[options]
use_Qt: False
[settings]
arch: x86
compiler: gcc
compiler.libcxx: libstdc++11
compiler.version: 4.5
os: Linux
[requires]
Hello2/0.1@lasote/stable:11111
HelloInfo1/0.45@fenix/testing:33333
OpenSSL/2.10@lasote/testing:2222
outdated from recipe: False
Package_ID: PlatformIndependantSHA
[options]
use_Qt: True
[settings]
arch: x86
compiler: gcc
compiler.libcxx: libstdc++
compiler.version: 4.3
outdated from recipe: True
""", self.client.user_io.out)
self.client.run('search helloTest/1.4.10@fenix/stable -q use_OpenGL=False')
self.assertIn("There are no packages for reference 'helloTest/1.4.10@fenix/stable' "
"matching the query 'use_OpenGL=False'", self.client.user_io.out)
self.client.run('search helloTest/1.4.10@fenix/stable -q use_OpenGL=True')
self.assertIn("Existing packages for recipe helloTest/1.4.10@fenix/stable", self.client.user_io.out)
self.client.run('search helloTest/1.4.10@fenix/stable -q "use_OpenGL=True AND arch=x64"')
self.assertIn("Existing packages for recipe helloTest/1.4.10@fenix/stable", self.client.user_io.out)
self.client.run('search helloTest/1.4.10@fenix/stable -q "use_OpenGL=True AND arch=x86"')
self.assertIn("There are no packages for reference 'helloTest/1.4.10@fenix/stable' "
"matching the query 'use_OpenGL=True AND arch=x86'", self.client.user_io.out)
def search_with_no_local_test(self):
client = TestClient()
client.run("search nonexist/1.0@lasote/stable")
self.assertIn("There are no packages", self.client.user_io.out)
| |
from __future__ import absolute_import, unicode_literals
import collections
import pickle
import re
import sys
from unittest import TestCase, main, SkipTest
from copy import copy, deepcopy
from typing import Any
from typing import TypeVar, AnyStr
from typing import T, KT, VT # Not in __all__.
from typing import Union, Optional
from typing import Tuple, List, MutableMapping
from typing import Callable
from typing import Generic, ClassVar
from typing import cast
from typing import Type
from typing import NewType
from typing import NamedTuple
from typing import IO, TextIO, BinaryIO
from typing import Pattern, Match
import abc
import typing
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
class BaseTestCase(TestCase):
def assertIsSubclass(self, cls, class_or_tuple, msg=None):
if not issubclass(cls, class_or_tuple):
message = '%r is not a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def assertNotIsSubclass(self, cls, class_or_tuple, msg=None):
if issubclass(cls, class_or_tuple):
message = '%r is a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def clear_caches(self):
for f in typing._cleanups:
f()
class Employee(object):
pass
class Manager(Employee):
pass
class Founder(Employee):
pass
class ManagingFounder(Manager, Founder):
pass
class AnyTests(BaseTestCase):
def test_any_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Any)
def test_any_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, Any)
with self.assertRaises(TypeError):
issubclass(Any, Employee)
def test_repr(self):
self.assertEqual(repr(Any), 'typing.Any')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Any)
with self.assertRaises(TypeError):
Any[int] # Any is not a generic type.
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(Any):
pass
with self.assertRaises(TypeError):
class A(type(Any)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Any()
with self.assertRaises(TypeError):
type(Any)()
def test_cannot_subscript(self):
with self.assertRaises(TypeError):
Any[int]
def test_any_is_subclass(self):
# These expressions must simply not fail.
typing.Match[Any]
typing.Pattern[Any]
typing.IO[Any]
class TypeVarTests(BaseTestCase):
def test_basic_plain(self):
T = TypeVar('T')
# T equals itself.
self.assertEqual(T, T)
# T is an instance of TypeVar
self.assertIsInstance(T, TypeVar)
def test_typevar_instance_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
isinstance(42, T)
def test_typevar_subclass_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
issubclass(int, T)
with self.assertRaises(TypeError):
issubclass(T, int)
def test_constrained_error(self):
with self.assertRaises(TypeError):
X = TypeVar('X', int)
X
def test_union_unique(self):
X = TypeVar('X')
Y = TypeVar('Y')
self.assertNotEqual(X, Y)
self.assertEqual(Union[X], X)
self.assertNotEqual(Union[X], Union[X, Y])
self.assertEqual(Union[X, X], X)
self.assertNotEqual(Union[X, int], Union[X])
self.assertNotEqual(Union[X, int], Union[int])
self.assertEqual(Union[X, int].__args__, (X, int))
self.assertEqual(Union[X, int].__parameters__, (X,))
self.assertIs(Union[X, int].__origin__, Union)
def test_union_constrained(self):
A = TypeVar('A', str, bytes)
self.assertNotEqual(Union[A, str], Union[A])
def test_repr(self):
self.assertEqual(repr(T), '~T')
self.assertEqual(repr(KT), '~KT')
self.assertEqual(repr(VT), '~VT')
self.assertEqual(repr(AnyStr), '~AnyStr')
T_co = TypeVar('T_co', covariant=True)
self.assertEqual(repr(T_co), '+T_co')
T_contra = TypeVar('T_contra', contravariant=True)
self.assertEqual(repr(T_contra), '-T_contra')
def test_no_redefinition(self):
self.assertNotEqual(TypeVar('T'), TypeVar('T'))
self.assertNotEqual(TypeVar('T', int, str), TypeVar('T', int, str))
def test_cannot_subclass_vars(self):
with self.assertRaises(TypeError):
class V(TypeVar('T')):
pass
def test_cannot_subclass_var_itself(self):
with self.assertRaises(TypeError):
class V(TypeVar):
pass
def test_cannot_instantiate_vars(self):
with self.assertRaises(TypeError):
TypeVar('A')()
def test_bound_errors(self):
with self.assertRaises(TypeError):
TypeVar('X', bound=42)
with self.assertRaises(TypeError):
TypeVar('X', str, float, bound=Employee)
class UnionTests(BaseTestCase):
def test_basics(self):
u = Union[int, float]
self.assertNotEqual(u, Union)
def test_subclass_error(self):
with self.assertRaises(TypeError):
issubclass(int, Union)
with self.assertRaises(TypeError):
issubclass(Union, int)
with self.assertRaises(TypeError):
issubclass(int, Union[int, str])
with self.assertRaises(TypeError):
issubclass(Union[int, str], int)
def test_union_any(self):
u = Union[Any]
self.assertEqual(u, Any)
u1 = Union[int, Any]
u2 = Union[Any, int]
u3 = Union[Any, object]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, Any)
self.assertNotEqual(u2, Any)
self.assertNotEqual(u3, Any)
def test_union_object(self):
u = Union[object]
self.assertEqual(u, object)
u = Union[int, object]
self.assertEqual(u, object)
u = Union[object, int]
self.assertEqual(u, object)
def test_unordered(self):
u1 = Union[int, float]
u2 = Union[float, int]
self.assertEqual(u1, u2)
def test_single_class_disappears(self):
t = Union[Employee]
self.assertIs(t, Employee)
def test_base_class_disappears(self):
u = Union[Employee, Manager, int]
self.assertEqual(u, Union[int, Employee])
u = Union[Manager, int, Employee]
self.assertEqual(u, Union[int, Employee])
u = Union[Employee, Manager]
self.assertIs(u, Employee)
def test_union_union(self):
u = Union[int, float]
v = Union[u, Employee]
self.assertEqual(v, Union[int, float, Employee])
def test_repr(self):
self.assertEqual(repr(Union), 'typing.Union')
u = Union[Employee, int]
self.assertEqual(repr(u), 'typing.Union[%s.Employee, int]' % __name__)
u = Union[int, Employee]
self.assertEqual(repr(u), 'typing.Union[int, %s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(Union):
pass
with self.assertRaises(TypeError):
class C(type(Union)):
pass
with self.assertRaises(TypeError):
class C(Union[int, str]):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Union()
u = Union[int, float]
with self.assertRaises(TypeError):
u()
with self.assertRaises(TypeError):
type(u)()
def test_union_generalization(self):
self.assertFalse(Union[str, typing.Iterable[int]] == str)
self.assertFalse(Union[str, typing.Iterable[int]] == typing.Iterable[int])
self.assertTrue(Union[str, typing.Iterable] == typing.Iterable)
def test_optional(self):
o = Optional[int]
u = Union[int, None]
self.assertEqual(o, u)
def test_empty(self):
with self.assertRaises(TypeError):
Union[()]
def test_union_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Union[int, str])
def test_union_str_pattern(self):
# Shouldn't crash; see http://bugs.python.org/issue25390
A = Union[str, Pattern]
A
def test_etree(self):
# See https://github.com/python/typing/issues/229
# (Only relevant for Python 2.)
try:
from xml.etree.cElementTree import Element
except ImportError:
raise SkipTest("cElementTree not found")
Union[Element, str] # Shouldn't crash
def Elem(*args):
return Element(*args)
Union[Elem, str] # Nor should this
class TupleTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
issubclass(Tuple, Tuple[int, str])
with self.assertRaises(TypeError):
issubclass(tuple, Tuple[int, str])
class TP(tuple): pass
self.assertTrue(issubclass(tuple, Tuple))
self.assertTrue(issubclass(TP, Tuple))
def test_equality(self):
self.assertEqual(Tuple[int], Tuple[int])
self.assertEqual(Tuple[int, ...], Tuple[int, ...])
self.assertNotEqual(Tuple[int], Tuple[int, int])
self.assertNotEqual(Tuple[int], Tuple[int, ...])
def test_tuple_subclass(self):
class MyTuple(tuple):
pass
self.assertTrue(issubclass(MyTuple, Tuple))
def test_tuple_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance((0, 0), Tuple[int, int])
isinstance((0, 0), Tuple)
def test_repr(self):
self.assertEqual(repr(Tuple), 'typing.Tuple')
self.assertEqual(repr(Tuple[()]), 'typing.Tuple[()]')
self.assertEqual(repr(Tuple[int, float]), 'typing.Tuple[int, float]')
self.assertEqual(repr(Tuple[int, ...]), 'typing.Tuple[int, ...]')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Tuple)
with self.assertRaises(TypeError):
issubclass(42, Tuple[int])
class CallableTests(BaseTestCase):
def test_self_subclass(self):
with self.assertRaises(TypeError):
self.assertTrue(issubclass(type(lambda x: x), Callable[[int], int]))
self.assertTrue(issubclass(type(lambda x: x), Callable))
def test_eq_hash(self):
self.assertEqual(Callable[[int], int], Callable[[int], int])
self.assertEqual(len({Callable[[int], int], Callable[[int], int]}), 1)
self.assertNotEqual(Callable[[int], int], Callable[[int], str])
self.assertNotEqual(Callable[[int], int], Callable[[str], int])
self.assertNotEqual(Callable[[int], int], Callable[[int, int], int])
self.assertNotEqual(Callable[[int], int], Callable[[], int])
self.assertNotEqual(Callable[[int], int], Callable)
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Callable()
with self.assertRaises(TypeError):
type(Callable)()
c = Callable[[int], str]
with self.assertRaises(TypeError):
c()
with self.assertRaises(TypeError):
type(c)()
def test_callable_wrong_forms(self):
with self.assertRaises(TypeError):
Callable[(), int]
with self.assertRaises(TypeError):
Callable[[()], int]
with self.assertRaises(TypeError):
Callable[[int, 1], 2]
def test_callable_instance_works(self):
def f():
pass
self.assertIsInstance(f, Callable)
self.assertNotIsInstance(None, Callable)
def test_callable_instance_type_error(self):
def f():
pass
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], None])
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], Any])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], None])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], Any])
def test_repr(self):
ct0 = Callable[[], bool]
self.assertEqual(repr(ct0), 'typing.Callable[[], bool]')
ct2 = Callable[[str, float], int]
self.assertEqual(repr(ct2), 'typing.Callable[[str, float], int]')
ctv = Callable[..., str]
self.assertEqual(repr(ctv), 'typing.Callable[..., str]')
def test_ellipsis_in_generic(self):
# Shouldn't crash; see https://github.com/python/typing/issues/259
typing.List[Callable[..., str]]
XK = TypeVar('XK', unicode, bytes)
XV = TypeVar('XV')
class SimpleMapping(Generic[XK, XV]):
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def get(self, key, default=None):
pass
class MySimpleMapping(SimpleMapping[XK, XV]):
def __init__(self):
self.store = {}
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def get(self, key, default=None):
try:
return self.store[key]
except KeyError:
return default
class ProtocolTests(BaseTestCase):
def test_supports_int(self):
self.assertIsSubclass(int, typing.SupportsInt)
self.assertNotIsSubclass(str, typing.SupportsInt)
def test_supports_float(self):
self.assertIsSubclass(float, typing.SupportsFloat)
self.assertNotIsSubclass(str, typing.SupportsFloat)
def test_supports_complex(self):
# Note: complex itself doesn't have __complex__.
class C(object):
def __complex__(self):
return 0j
self.assertIsSubclass(C, typing.SupportsComplex)
self.assertNotIsSubclass(str, typing.SupportsComplex)
def test_supports_abs(self):
self.assertIsSubclass(float, typing.SupportsAbs)
self.assertIsSubclass(int, typing.SupportsAbs)
self.assertNotIsSubclass(str, typing.SupportsAbs)
def test_reversible(self):
self.assertIsSubclass(list, typing.Reversible)
self.assertNotIsSubclass(int, typing.Reversible)
def test_protocol_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(0, typing.SupportsAbs)
class C1(typing.SupportsInt):
def __int__(self):
return 42
class C2(C1):
pass
c = C2()
self.assertIsInstance(c, C1)
class GenericTests(BaseTestCase):
def test_basics(self):
X = SimpleMapping[str, Any]
self.assertEqual(X.__parameters__, ())
with self.assertRaises(TypeError):
X[unicode]
with self.assertRaises(TypeError):
X[unicode, unicode]
Y = SimpleMapping[XK, unicode]
self.assertEqual(Y.__parameters__, (XK,))
Y[unicode]
with self.assertRaises(TypeError):
Y[unicode, unicode]
def test_generic_errors(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
Generic[T]()
with self.assertRaises(TypeError):
isinstance([], List[int])
with self.assertRaises(TypeError):
issubclass(list, List[int])
def test_init(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T, T]
with self.assertRaises(TypeError):
Generic[T, S, T]
def test_repr(self):
self.assertEqual(repr(SimpleMapping),
__name__ + '.' + 'SimpleMapping')
self.assertEqual(repr(MySimpleMapping),
__name__ + '.' + 'MySimpleMapping')
def test_chain_repr(self):
T = TypeVar('T')
S = TypeVar('S')
class C(Generic[T]):
pass
X = C[Tuple[S, T]]
self.assertEqual(X, C[Tuple[S, T]])
self.assertNotEqual(X, C[Tuple[T, S]])
Y = X[T, int]
self.assertEqual(Y, X[T, int])
self.assertNotEqual(Y, X[S, int])
self.assertNotEqual(Y, X[T, str])
Z = Y[str]
self.assertEqual(Z, Y[str])
self.assertNotEqual(Z, Y[int])
self.assertNotEqual(Z, Y[T])
self.assertTrue(str(Z).endswith(
'.C[typing.Tuple[str, int]]'))
def test_new_repr(self):
T = TypeVar('T')
U = TypeVar('U', covariant=True)
S = TypeVar('S')
self.assertEqual(repr(List), 'typing.List')
self.assertEqual(repr(List[T]), 'typing.List[~T]')
self.assertEqual(repr(List[U]), 'typing.List[+U]')
self.assertEqual(repr(List[S][T][int]), 'typing.List[int]')
self.assertEqual(repr(List[int]), 'typing.List[int]')
def test_new_repr_complex(self):
T = TypeVar('T')
TS = TypeVar('TS')
self.assertEqual(repr(typing.Mapping[T, TS][TS, T]), 'typing.Mapping[~TS, ~T]')
self.assertEqual(repr(List[Tuple[T, TS]][int, T]),
'typing.List[typing.Tuple[int, ~T]]')
self.assertEqual(repr(List[Tuple[T, T]][List[int]]),
'typing.List[typing.Tuple[typing.List[int], typing.List[int]]]')
def test_new_repr_bare(self):
T = TypeVar('T')
self.assertEqual(repr(Generic[T]), 'typing.Generic[~T]')
self.assertEqual(repr(typing._Protocol[T]), 'typing.Protocol[~T]')
class C(typing.Dict[Any, Any]): pass
# this line should just work
repr(C.__mro__)
def test_dict(self):
T = TypeVar('T')
class B(Generic[T]):
pass
b = B()
b.foo = 42
self.assertEqual(b.__dict__, {'foo': 42})
class C(B[int]):
pass
c = C()
c.bar = 'abc'
self.assertEqual(c.__dict__, {'bar': 'abc'})
def test_false_subclasses(self):
class MyMapping(MutableMapping[str, str]): pass
self.assertNotIsInstance({}, MyMapping)
self.assertNotIsSubclass(dict, MyMapping)
def test_abc_bases(self):
class MM(MutableMapping[str, str]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
# this should just work
MM().update()
self.assertIsInstance(MM(), collections_abc.MutableMapping)
self.assertIsInstance(MM(), MutableMapping)
self.assertNotIsInstance(MM(), List)
self.assertNotIsInstance({}, MM)
def test_multiple_bases(self):
class MM1(MutableMapping[str, str], collections_abc.MutableMapping):
pass
with self.assertRaises(TypeError):
# consistent MRO not possible
class MM2(collections_abc.MutableMapping, MutableMapping[str, str]):
pass
def test_orig_bases(self):
T = TypeVar('T')
class C(typing.Dict[str, T]): pass
self.assertEqual(C.__orig_bases__, (typing.Dict[str, T],))
def test_naive_runtime_checks(self):
def naive_dict_check(obj, tp):
# Check if a dictionary conforms to Dict type
if len(tp.__parameters__) > 0:
raise NotImplementedError
if tp.__args__:
KT, VT = tp.__args__
return all(isinstance(k, KT) and isinstance(v, VT)
for k, v in obj.items())
self.assertTrue(naive_dict_check({'x': 1}, typing.Dict[typing.Text, int]))
self.assertFalse(naive_dict_check({1: 'x'}, typing.Dict[typing.Text, int]))
with self.assertRaises(NotImplementedError):
naive_dict_check({1: 'x'}, typing.Dict[typing.Text, T])
def naive_generic_check(obj, tp):
# Check if an instance conforms to the generic class
if not hasattr(obj, '__orig_class__'):
raise NotImplementedError
return obj.__orig_class__ == tp
class Node(Generic[T]): pass
self.assertTrue(naive_generic_check(Node[int](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), List))
with self.assertRaises(NotImplementedError):
naive_generic_check([1,2,3], Node[int])
def naive_list_base_check(obj, tp):
# Check if list conforms to a List subclass
return all(isinstance(x, tp.__orig_bases__[0].__args__[0])
for x in obj)
class C(List[int]): pass
self.assertTrue(naive_list_base_check([1, 2, 3], C))
self.assertFalse(naive_list_base_check(['a', 'b'], C))
def test_multi_subscr_base(self):
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class C(List[T][U][V]): pass
class D(C, List[T][U][V]): pass
self.assertEqual(C.__parameters__, (V,))
self.assertEqual(D.__parameters__, (V,))
self.assertEqual(C[int].__parameters__, ())
self.assertEqual(D[int].__parameters__, ())
self.assertEqual(C[int].__args__, (int,))
self.assertEqual(D[int].__args__, (int,))
self.assertEqual(C.__bases__, (List,))
self.assertEqual(D.__bases__, (C, List))
self.assertEqual(C.__orig_bases__, (List[T][U][V],))
self.assertEqual(D.__orig_bases__, (C, List[T][U][V]))
def test_extended_generic_rules_eq(self):
T = TypeVar('T')
U = TypeVar('U')
self.assertEqual(Tuple[T, T][int], Tuple[int, int])
self.assertEqual(typing.Iterable[Tuple[T, T]][T], typing.Iterable[Tuple[T, T]])
with self.assertRaises(TypeError):
Tuple[T, int][()]
with self.assertRaises(TypeError):
Tuple[T, U][T, ...]
self.assertEqual(Union[T, int][int], int)
self.assertEqual(Union[T, U][int, Union[int, str]], Union[int, str])
class Base(object): pass
class Derived(Base): pass
self.assertEqual(Union[T, Base][Derived], Base)
with self.assertRaises(TypeError):
Union[T, int][1]
self.assertEqual(Callable[[T], T][KT], Callable[[KT], KT])
self.assertEqual(Callable[..., List[T]][int], Callable[..., List[int]])
with self.assertRaises(TypeError):
Callable[[T], U][..., int]
with self.assertRaises(TypeError):
Callable[[T], U][[], int]
def test_extended_generic_rules_repr(self):
T = TypeVar('T')
self.assertEqual(repr(Union[Tuple, Callable]).replace('typing.', ''),
'Union[Tuple, Callable]')
self.assertEqual(repr(Union[Tuple, Tuple[int]]).replace('typing.', ''),
'Tuple')
self.assertEqual(repr(Callable[..., Optional[T]][int]).replace('typing.', ''),
'Callable[..., Union[int, NoneType]]')
self.assertEqual(repr(Callable[[], List[T]][int]).replace('typing.', ''),
'Callable[[], List[int]]')
def test_generic_forvard_ref(self):
LLT = List[List['CC']]
class CC: pass
self.assertEqual(typing._eval_type(LLT, globals(), locals()), List[List[CC]])
T = TypeVar('T')
AT = Tuple[T, ...]
self.assertIs(typing._eval_type(AT, globals(), locals()), AT)
CT = Callable[..., List[T]]
self.assertIs(typing._eval_type(CT, globals(), locals()), CT)
def test_extended_generic_rules_subclassing(self):
class T1(Tuple[T, KT]): pass
class T2(Tuple[T, ...]): pass
class C1(Callable[[T], T]): pass
class C2(Callable[..., int]):
def __call__(self):
return None
self.assertEqual(T1.__parameters__, (T, KT))
self.assertEqual(T1[int, str].__args__, (int, str))
self.assertEqual(T1[int, T].__origin__, T1)
self.assertEqual(T2.__parameters__, (T,))
with self.assertRaises(TypeError):
T1[int]
with self.assertRaises(TypeError):
T2[int, str]
self.assertEqual(repr(C1[int]).split('.')[-1], 'C1[int]')
self.assertEqual(C2.__parameters__, ())
self.assertIsInstance(C2(), collections_abc.Callable)
self.assertIsSubclass(C2, collections_abc.Callable)
self.assertIsSubclass(C1, collections_abc.Callable)
self.assertIsInstance(T1(), tuple)
self.assertIsSubclass(T2, tuple)
self.assertIsSubclass(Tuple[int, ...], typing.Sequence)
self.assertIsSubclass(Tuple[int, ...], typing.Iterable)
def test_fail_with_bare_union(self):
with self.assertRaises(TypeError):
List[Union]
with self.assertRaises(TypeError):
Tuple[Optional]
with self.assertRaises(TypeError):
ClassVar[ClassVar]
with self.assertRaises(TypeError):
List[ClassVar[int]]
def test_fail_with_bare_generic(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
List[Generic]
with self.assertRaises(TypeError):
Tuple[Generic[T]]
with self.assertRaises(TypeError):
List[typing._Protocol]
def test_type_erasure_special(self):
T = TypeVar('T')
# this is the only test that checks type caching
self.clear_caches()
class MyTup(Tuple[T, T]): pass
self.assertIs(MyTup[int]().__class__, MyTup)
self.assertIs(MyTup[int]().__orig_class__, MyTup[int])
class MyCall(Callable[..., T]):
def __call__(self): return None
self.assertIs(MyCall[T]().__class__, MyCall)
self.assertIs(MyCall[T]().__orig_class__, MyCall[T])
class MyDict(typing.Dict[T, T]): pass
self.assertIs(MyDict[int]().__class__, MyDict)
self.assertIs(MyDict[int]().__orig_class__, MyDict[int])
class MyDef(typing.DefaultDict[str, T]): pass
self.assertIs(MyDef[int]().__class__, MyDef)
self.assertIs(MyDef[int]().__orig_class__, MyDef[int])
def test_all_repr_eq_any(self):
objs = (getattr(typing, el) for el in typing.__all__)
for obj in objs:
self.assertNotEqual(repr(obj), '')
self.assertEqual(obj, obj)
if getattr(obj, '__parameters__', None) and len(obj.__parameters__) == 1:
self.assertEqual(obj[Any].__args__, (Any,))
if isinstance(obj, type):
for base in obj.__mro__:
self.assertNotEqual(repr(base), '')
self.assertEqual(base, base)
def test_pickle(self):
global C # pickle wants to reference the class by name
T = TypeVar('T')
class B(Generic[T]):
pass
class C(B[int]):
pass
c = C()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
simples = [Any, Union, Tuple, Callable, ClassVar, List, typing.Iterable]
for s in simples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
def test_copy_and_deepcopy(self):
T = TypeVar('T')
class Node(Generic[T]): pass
things = [Any, Union[T, int], Tuple[T, int], Callable[..., T], Callable[[int], int],
Tuple[Any, Any], Node[T], Node[int], Node[Any], typing.Iterable[T],
typing.Iterable[Any], typing.Iterable[int], typing.Dict[int, str],
typing.Dict[T, Any], ClassVar[int], ClassVar[List[T]], Tuple['T', 'T'],
Union['T', int], List['T'], typing.Mapping['T', int]]
for t in things:
self.assertEqual(t, deepcopy(t))
self.assertEqual(t, copy(t))
def test_parameterized_slots(self):
T = TypeVar('T')
class C(Generic[T]):
__slots__ = ('potato',)
c = C()
c_int = C[int]()
self.assertEqual(C.__slots__, C[str].__slots__)
c.potato = 0
c_int.potato = 0
with self.assertRaises(AttributeError):
c.tomato = 0
with self.assertRaises(AttributeError):
c_int.tomato = 0
self.assertEqual(typing._eval_type(C['C'], globals(), locals()), C[C])
self.assertEqual(typing._eval_type(C['C'], globals(), locals()).__slots__,
C.__slots__)
self.assertEqual(copy(C[int]), deepcopy(C[int]))
def test_parameterized_slots_dict(self):
T = TypeVar('T')
class D(Generic[T]):
__slots__ = {'banana': 42}
d = D()
d_int = D[int]()
self.assertEqual(D.__slots__, D[str].__slots__)
d.banana = 'yes'
d_int.banana = 'yes'
with self.assertRaises(AttributeError):
d.foobar = 'no'
with self.assertRaises(AttributeError):
d_int.foobar = 'no'
def test_errors(self):
with self.assertRaises(TypeError):
B = SimpleMapping[XK, Any]
class C(Generic[B]):
pass
def test_repr_2(self):
PY32 = sys.version_info[:2] < (3, 3)
class C(Generic[T]):
pass
self.assertEqual(C.__module__, __name__)
if not PY32:
self.assertEqual(C.__qualname__,
'GenericTests.test_repr_2.<locals>.C')
self.assertEqual(repr(C).split('.')[-1], 'C')
X = C[int]
self.assertEqual(X.__module__, __name__)
if not PY32:
self.assertTrue(X.__qualname__.endswith('.<locals>.C'))
self.assertEqual(repr(X).split('.')[-1], 'C[int]')
class Y(C[int]):
pass
self.assertEqual(Y.__module__, __name__)
if not PY32:
self.assertEqual(Y.__qualname__,
'GenericTests.test_repr_2.<locals>.Y')
self.assertEqual(repr(Y).split('.')[-1], 'Y')
def test_eq_1(self):
self.assertEqual(Generic, Generic)
self.assertEqual(Generic[T], Generic[T])
self.assertNotEqual(Generic[KT], Generic[VT])
def test_eq_2(self):
class A(Generic[T]):
pass
class B(Generic[T]):
pass
self.assertEqual(A, A)
self.assertNotEqual(A, B)
self.assertEqual(A[T], A[T])
self.assertNotEqual(A[T], B[T])
def test_multiple_inheritance(self):
class A(Generic[T, VT]):
pass
class B(Generic[KT, T]):
pass
class C(A[T, VT], Generic[VT, T, KT], B[KT, T]):
pass
self.assertEqual(C.__parameters__, (VT, T, KT))
def test_nested(self):
G = Generic
class Visitor(G[T]):
a = None
def set(self, a):
self.a = a
def get(self):
return self.a
def visit(self):
return self.a
V = Visitor[typing.List[int]]
class IntListVisitor(V):
def append(self, x):
self.a.append(x)
a = IntListVisitor()
a.set([])
a.append(1)
a.append(42)
self.assertEqual(a.get(), [1, 42])
def test_type_erasure(self):
T = TypeVar('T')
class Node(Generic[T]):
def __init__(self, label,
left = None,
right = None):
self.label = label # type: T
self.left = left # type: Optional[Node[T]]
self.right = right # type: Optional[Node[T]]
def foo(x):
a = Node(x)
b = Node[T](x)
c = Node[Any](x)
self.assertIs(type(a), Node)
self.assertIs(type(b), Node)
self.assertIs(type(c), Node)
self.assertEqual(a.label, x)
self.assertEqual(b.label, x)
self.assertEqual(c.label, x)
foo(42)
def test_implicit_any(self):
T = TypeVar('T')
class C(Generic[T]):
pass
class D(C):
pass
self.assertEqual(D.__parameters__, ())
with self.assertRaises(Exception):
D[int]
with self.assertRaises(Exception):
D[Any]
with self.assertRaises(Exception):
D[T]
class ClassVarTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
ClassVar[1]
with self.assertRaises(TypeError):
ClassVar[int, str]
with self.assertRaises(TypeError):
ClassVar[int][str]
def test_repr(self):
self.assertEqual(repr(ClassVar), 'typing.ClassVar')
cv = ClassVar[int]
self.assertEqual(repr(cv), 'typing.ClassVar[int]')
cv = ClassVar[Employee]
self.assertEqual(repr(cv), 'typing.ClassVar[%s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(ClassVar)):
pass
with self.assertRaises(TypeError):
class C(type(ClassVar[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
ClassVar()
with self.assertRaises(TypeError):
type(ClassVar)()
with self.assertRaises(TypeError):
type(ClassVar[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, ClassVar[int])
with self.assertRaises(TypeError):
issubclass(int, ClassVar)
class CastTests(BaseTestCase):
def test_basics(self):
self.assertEqual(cast(int, 42), 42)
self.assertEqual(cast(float, 42), 42)
self.assertIs(type(cast(float, 42)), int)
self.assertEqual(cast(Any, 42), 42)
self.assertEqual(cast(list, 42), 42)
self.assertEqual(cast(Union[str, float], 42), 42)
self.assertEqual(cast(AnyStr, 42), 42)
self.assertEqual(cast(None, 42), 42)
def test_errors(self):
# Bogus calls are not expected to fail.
cast(42, 42)
cast('hello', 42)
class ForwardRefTests(BaseTestCase):
def test_forwardref_instance_type_error(self):
fr = typing._ForwardRef('int')
with self.assertRaises(TypeError):
isinstance(42, fr)
def test_syntax_error(self):
with self.assertRaises(SyntaxError):
Generic['/T']
class OverloadTests(BaseTestCase):
def test_overload_exists(self):
from typing import overload
def test_overload_fails(self):
from typing import overload
with self.assertRaises(RuntimeError):
@overload
def blah():
pass
blah()
def test_overload_succeeds(self):
from typing import overload
@overload
def blah():
pass
def blah():
pass
blah()
class CollectionsAbcTests(BaseTestCase):
def test_hashable(self):
self.assertIsInstance(42, typing.Hashable)
self.assertNotIsInstance([], typing.Hashable)
def test_iterable(self):
self.assertIsInstance([], typing.Iterable)
# Due to ABC caching, the second time takes a separate code
# path and could fail. So call this a few times.
self.assertIsInstance([], typing.Iterable)
self.assertIsInstance([], typing.Iterable)
self.assertNotIsInstance(42, typing.Iterable)
# Just in case, also test issubclass() a few times.
self.assertIsSubclass(list, typing.Iterable)
self.assertIsSubclass(list, typing.Iterable)
def test_iterator(self):
it = iter([])
self.assertIsInstance(it, typing.Iterator)
self.assertNotIsInstance(42, typing.Iterator)
def test_sized(self):
self.assertIsInstance([], typing.Sized)
self.assertNotIsInstance(42, typing.Sized)
def test_container(self):
self.assertIsInstance([], typing.Container)
self.assertNotIsInstance(42, typing.Container)
def test_abstractset(self):
self.assertIsInstance(set(), typing.AbstractSet)
self.assertNotIsInstance(42, typing.AbstractSet)
def test_mutableset(self):
self.assertIsInstance(set(), typing.MutableSet)
self.assertNotIsInstance(frozenset(), typing.MutableSet)
def test_mapping(self):
self.assertIsInstance({}, typing.Mapping)
self.assertNotIsInstance(42, typing.Mapping)
def test_mutablemapping(self):
self.assertIsInstance({}, typing.MutableMapping)
self.assertNotIsInstance(42, typing.MutableMapping)
def test_sequence(self):
self.assertIsInstance([], typing.Sequence)
self.assertNotIsInstance(42, typing.Sequence)
def test_mutablesequence(self):
self.assertIsInstance([], typing.MutableSequence)
self.assertNotIsInstance((), typing.MutableSequence)
def test_bytestring(self):
self.assertIsInstance(b'', typing.ByteString)
self.assertIsInstance(bytearray(b''), typing.ByteString)
def test_list(self):
self.assertIsSubclass(list, typing.List)
def test_set(self):
self.assertIsSubclass(set, typing.Set)
self.assertNotIsSubclass(frozenset, typing.Set)
def test_frozenset(self):
self.assertIsSubclass(frozenset, typing.FrozenSet)
self.assertNotIsSubclass(set, typing.FrozenSet)
def test_dict(self):
self.assertIsSubclass(dict, typing.Dict)
def test_no_list_instantiation(self):
with self.assertRaises(TypeError):
typing.List()
with self.assertRaises(TypeError):
typing.List[T]()
with self.assertRaises(TypeError):
typing.List[int]()
def test_list_subclass(self):
class MyList(typing.List[int]):
pass
a = MyList()
self.assertIsInstance(a, MyList)
self.assertIsInstance(a, typing.Sequence)
self.assertIsSubclass(MyList, list)
self.assertNotIsSubclass(list, MyList)
def test_no_dict_instantiation(self):
with self.assertRaises(TypeError):
typing.Dict()
with self.assertRaises(TypeError):
typing.Dict[KT, VT]()
with self.assertRaises(TypeError):
typing.Dict[str, int]()
def test_dict_subclass(self):
class MyDict(typing.Dict[str, int]):
pass
d = MyDict()
self.assertIsInstance(d, MyDict)
self.assertIsInstance(d, typing.MutableMapping)
self.assertIsSubclass(MyDict, dict)
self.assertNotIsSubclass(dict, MyDict)
def test_no_defaultdict_instantiation(self):
with self.assertRaises(TypeError):
typing.DefaultDict()
with self.assertRaises(TypeError):
typing.DefaultDict[KT, VT]()
with self.assertRaises(TypeError):
typing.DefaultDict[str, int]()
def test_defaultdict_subclass(self):
class MyDefDict(typing.DefaultDict[str, int]):
pass
dd = MyDefDict()
self.assertIsInstance(dd, MyDefDict)
self.assertIsSubclass(MyDefDict, collections.defaultdict)
self.assertNotIsSubclass(collections.defaultdict, MyDefDict)
def test_no_set_instantiation(self):
with self.assertRaises(TypeError):
typing.Set()
with self.assertRaises(TypeError):
typing.Set[T]()
with self.assertRaises(TypeError):
typing.Set[int]()
def test_set_subclass_instantiation(self):
class MySet(typing.Set[int]):
pass
d = MySet()
self.assertIsInstance(d, MySet)
def test_no_frozenset_instantiation(self):
with self.assertRaises(TypeError):
typing.FrozenSet()
with self.assertRaises(TypeError):
typing.FrozenSet[T]()
with self.assertRaises(TypeError):
typing.FrozenSet[int]()
def test_frozenset_subclass_instantiation(self):
class MyFrozenSet(typing.FrozenSet[int]):
pass
d = MyFrozenSet()
self.assertIsInstance(d, MyFrozenSet)
def test_no_tuple_instantiation(self):
with self.assertRaises(TypeError):
Tuple()
with self.assertRaises(TypeError):
Tuple[T]()
with self.assertRaises(TypeError):
Tuple[int]()
def test_generator(self):
def foo():
yield 42
g = foo()
self.assertIsSubclass(type(g), typing.Generator)
def test_no_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.Generator()
with self.assertRaises(TypeError):
typing.Generator[T, T, T]()
with self.assertRaises(TypeError):
typing.Generator[int, int, int]()
def test_subclassing(self):
class MMA(typing.MutableMapping):
pass
with self.assertRaises(TypeError): # It's abstract
MMA()
class MMC(MMA):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMC()), 0)
assert callable(MMC.update)
self.assertIsInstance(MMC(), typing.Mapping)
class MMB(typing.MutableMapping[KT, VT]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMB()), 0)
self.assertEqual(len(MMB[str, str]()), 0)
self.assertEqual(len(MMB[KT, VT]()), 0)
self.assertNotIsSubclass(dict, MMA)
self.assertNotIsSubclass(dict, MMB)
self.assertIsSubclass(MMA, typing.Mapping)
self.assertIsSubclass(MMB, typing.Mapping)
self.assertIsSubclass(MMC, typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), collections.Mapping)
self.assertIsSubclass(MMA, collections.Mapping)
self.assertIsSubclass(MMB, collections.Mapping)
self.assertIsSubclass(MMC, collections.Mapping)
self.assertIsSubclass(MMB[str, str], typing.Mapping)
self.assertIsSubclass(MMC, MMA)
class I(typing.Iterable): pass
self.assertNotIsSubclass(list, I)
class G(typing.Generator[int, int, int]): pass
def g(): yield 0
self.assertIsSubclass(G, typing.Generator)
self.assertIsSubclass(G, typing.Iterable)
if hasattr(collections, 'Generator'):
self.assertIsSubclass(G, collections.Generator)
self.assertIsSubclass(G, collections.Iterable)
self.assertNotIsSubclass(type(g), G)
def test_subclassing_subclasshook(self):
class Base(typing.Iterable):
@classmethod
def __subclasshook__(cls, other):
if other.__name__ == 'Foo':
return True
else:
return False
class C(Base): pass
class Foo: pass
class Bar: pass
self.assertIsSubclass(Foo, Base)
self.assertIsSubclass(Foo, C)
self.assertNotIsSubclass(Bar, C)
def test_subclassing_register(self):
class A(typing.Container): pass
class B(A): pass
class C: pass
A.register(C)
self.assertIsSubclass(C, A)
self.assertNotIsSubclass(C, B)
class D: pass
B.register(D)
self.assertIsSubclass(D, A)
self.assertIsSubclass(D, B)
class M(): pass
collections.MutableMapping.register(M)
self.assertIsSubclass(M, typing.Mapping)
def test_collections_as_base(self):
class M(collections.Mapping): pass
self.assertIsSubclass(M, typing.Mapping)
self.assertIsSubclass(M, typing.Iterable)
class S(collections.MutableSequence): pass
self.assertIsSubclass(S, typing.MutableSequence)
self.assertIsSubclass(S, typing.Iterable)
class I(collections.Iterable): pass
self.assertIsSubclass(I, typing.Iterable)
class A(collections.Mapping): pass
class B: pass
A.register(B)
self.assertIsSubclass(B, typing.Mapping)
class TypeTests(BaseTestCase):
def test_type_basic(self):
class User(object): pass
class BasicUser(User): pass
class ProUser(User): pass
def new_user(user_class):
# type: (Type[User]) -> User
return user_class()
joe = new_user(BasicUser)
def test_type_typevar(self):
class User(object): pass
class BasicUser(User): pass
class ProUser(User): pass
global U
U = TypeVar('U', bound=User)
def new_user(user_class):
# type: (Type[U]) -> U
return user_class()
joe = new_user(BasicUser)
def test_type_optional(self):
A = Optional[Type[BaseException]]
def foo(a):
# type: (A) -> Optional[BaseException]
if a is None:
return None
else:
return a()
assert isinstance(foo(KeyboardInterrupt), KeyboardInterrupt)
assert foo(None) is None
class NewTypeTests(BaseTestCase):
def test_basic(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
self.assertIsInstance(UserId(5), int)
self.assertIsInstance(UserName('Joe'), type('Joe'))
self.assertEqual(UserId(5) + 1, 6)
def test_errors(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
with self.assertRaises(TypeError):
issubclass(UserId, int)
with self.assertRaises(TypeError):
class D(UserName):
pass
class NamedTupleTests(BaseTestCase):
def test_basics(self):
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
self.assertIsSubclass(Emp, tuple)
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.assertIsInstance(joe, Emp)
self.assertIsInstance(joe, tuple)
self.assertEqual(joe.name, 'Joe')
self.assertEqual(joe.id, 42)
self.assertEqual(jim.name, 'Jim')
self.assertEqual(jim.id, 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
self.assertEqual(Emp._field_types, dict(name=str, id=int))
def test_pickle(self):
global Emp # pickle wants to reference the class by name
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
jane = Emp('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
class IOTests(BaseTestCase):
def test_io_submodule(self):
from typing.io import IO, TextIO, BinaryIO, __all__, __name__
self.assertIs(IO, typing.IO)
self.assertIs(TextIO, typing.TextIO)
self.assertIs(BinaryIO, typing.BinaryIO)
self.assertEqual(set(__all__), set(['IO', 'TextIO', 'BinaryIO']))
self.assertEqual(__name__, 'typing.io')
class RETests(BaseTestCase):
# Much of this is really testing _TypeAlias.
def test_basics(self):
pat = re.compile('[a-z]+', re.I)
self.assertIsSubclass(pat.__class__, Pattern)
self.assertIsSubclass(type(pat), Pattern)
self.assertIsInstance(pat, Pattern)
mat = pat.search('12345abcde.....')
self.assertIsSubclass(mat.__class__, Match)
self.assertIsSubclass(type(mat), Match)
self.assertIsInstance(mat, Match)
# these should just work
p = Pattern[Union[str, bytes]]
m = Match[Union[bytes, str]]
def test_errors(self):
with self.assertRaises(TypeError):
# Doesn't fit AnyStr.
Pattern[int]
with self.assertRaises(TypeError):
# Can't change type vars?
Match[T]
m = Match[Union[str, bytes]]
with self.assertRaises(TypeError):
# Too complicated?
m[str]
with self.assertRaises(TypeError):
# We don't support isinstance().
isinstance(42, Pattern[str])
def test_repr(self):
self.assertEqual(repr(Pattern), 'Pattern[~AnyStr]')
self.assertEqual(repr(Pattern[unicode]), 'Pattern[unicode]')
self.assertEqual(repr(Pattern[str]), 'Pattern[str]')
self.assertEqual(repr(Match), 'Match[~AnyStr]')
self.assertEqual(repr(Match[unicode]), 'Match[unicode]')
self.assertEqual(repr(Match[str]), 'Match[str]')
def test_re_submodule(self):
from typing.re import Match, Pattern, __all__, __name__
self.assertIs(Match, typing.Match)
self.assertIs(Pattern, typing.Pattern)
self.assertEqual(set(__all__), set(['Match', 'Pattern']))
self.assertEqual(__name__, 'typing.re')
def test_cannot_subclass(self):
with self.assertRaises(TypeError) as ex:
class A(typing.Match):
pass
self.assertEqual(str(ex.exception),
"Cannot subclass typing._TypeAlias")
class AllTests(BaseTestCase):
"""Tests for __all__."""
def test_all(self):
from typing import __all__ as a
# Just spot-check the first and last of every category.
self.assertIn('AbstractSet', a)
self.assertIn('ValuesView', a)
self.assertIn('cast', a)
self.assertIn('overload', a)
# Check that io and re are not exported.
self.assertNotIn('io', a)
self.assertNotIn('re', a)
# Spot-check that stdlib modules aren't exported.
self.assertNotIn('os', a)
self.assertNotIn('sys', a)
# Check that Text is defined.
self.assertIn('Text', a)
def test_respect_no_type_check(self):
@typing.no_type_check
class NoTpCheck(object):
class Inn(object):
def __init__(self, x): pass
# type: (this is not actualy a type) -> None
self.assertTrue(NoTpCheck.__no_type_check__)
self.assertTrue(NoTpCheck.Inn.__init__.__no_type_check__)
def test_get_type_hints_dummy(self):
def foo(x):
# type: (int) -> int
return x + 1
self.assertIsNone(typing.get_type_hints(foo))
if __name__ == '__main__':
main()
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import Globs, RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'unpacked_jars': UnpackedJars,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'globs': Globs.factory,
'rglobs': RGlobs.factory,
'from_target': FromTarget,
},
objects={
'jar': JarDependency,
'scala_jar': ScalaJarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = {'spec_excludes': [], 'exclude_target_regexp': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/src/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=rglobs("*.java"),
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
self.add_to_build_file('BUILD.config', dedent("""
resources(
name='pants-config',
sources = globs('pants.ini*')
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options={'spec_excludes': 'root/src/py/1'},
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/src/resources/a:a_resources',
workspace=self.workspace(files=['root/src/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/src/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/src/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec_removed_files(self):
self.assert_console_output(
'root/src/java/a:a_java',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/java/a/b/c/Foo.java'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_deferred_sources(self):
self.add_to_build_file('root/proto', dedent("""
java_protobuf_library(name='unpacked_jars',
sources=from_target(':external-source'),
)
unpacked_jars(name='external-source',
libraries=[':external-source-jars'],
include_patterns=[
'com/squareup/testing/**/*.proto',
],
)
jar_library(name='external-source-jars',
jars=[
jar(org='com.squareup.testing.protolib', name='protolib-external-test', rev='0.0.2'),
],
)
"""))
self.assert_console_output(
'root/proto:unpacked_jars',
'root/proto:external-source',
'root/proto:external-source-jars',
workspace=self.workspace(files=['root/proto/BUILD'])
)
def test_globs_in_resources(self):
self.add_to_build_file('root/resources', dedent("""
resources(
name='resources',
sources=globs('*')
)
"""))
self.assert_console_output(
'root/resources:resources',
workspace=self.workspace(files=['root/resources/foo/bar/baz.yml'])
)
def test_root_config(self):
self.assert_console_output(
':pants-config',
workspace=self.workspace(files=['pants.ini'])
)
| |
#!/usr/bin/python
# Copyright (C) 2011, Kerensa McElroy.
# kerensa@unsw.edu.au
# This file is part of the sequence simulator GemSIM.
# It is used to calculate a platform- and run- specific
# error model for generating realistic sequencing reads.
# Alternatively, users may employ one of the precomputed
# error models distributed as part of the GemSIM package.
# GemSIM is free software; it may be redistributed and
# modified under the terms of the GNU General Public
# License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option)
# any later version.
# GemSIM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more
# details.
# You should have recieved a copy of the GNU General Public
# License along with GemSIM. If not, see
# http://www.gnu.org/licenses/.
import sys
import getopt
import cPickle
import gzip
import logging
import logging.handlers
import numpy as np
# Make a global logging object.
errlog=logging.getLogger("ErrLog")
# Set logging level, and write everything to a file
errlog.setLevel(logging.DEBUG)
LOG_FILENAME='./err.log'
h=logging.FileHandler(LOG_FILENAME,'w')
f=logging.Formatter("%(levelname)s %(asctime)s %(funcName)s %(lineno)d %(message)s")
h.setFormatter(f)
errlog.addHandler(h)
def rComp(sequence):
"""Reverse complements a sequence, preserving case."""
d={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
cSeq=''
for s in sequence:
cSeq+=d[s]
cSeq=cSeq[::-1]
return cSeq
def getRef(refFile):
"""Returns a genome reference."""
refDict={}
hdList=[]
ref=''
num=0
try:
f=open(refFile)
except IOError:
errlog.error('Cannot find reference file ' +refFile+'. Please check pathname.')
sys.exit('Cannot find reference file '+refFile+'. Please check pathname.')
i=f.readline()
head=i[1:51].rstrip()
i=f.readline().rstrip()
while i:
if i[0]!='>':
ref+=i.rstrip()
i=f.readline()
else:
if head in hdList:
num+=1
head=head+str(num)
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
hdList.append(head)
head=i[1:51].rstrip()
i=f.readline()
ref=''
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
errlog.debug('Reference file successfully parsed.')
return refDict
def parseFasta(file):
"""Returns sequence string from FASTA format."""
f=open(file)
ref=''
for i in f:
if i[0]!='>':
ref+=i.rstrip()
for l in 'RYLMKSWHBVD':
ref=ref.replace(l,'N')
return ref
def flip(refSlice,seq,qual,cigar):
"""Reverse complements a read."""
flipSeq=''
flipRS=''
flipCig=[]
comp={'A':'T','C':'G','G':'C','T':'A','N':'N','a':'t','t':'a','g':'c','c':'g','n':'n'}
for i in seq:
flipSeq+=comp[i]
for i in refSlice:
flipRS+=comp[i]
for i in range(0,len(cigar),2):
flipCig.append(cigar[i+1])
flipCig.append(cigar[i])
flipCig.reverse()
return flipRS[::-1],flipSeq[::-1],qual[::-1],flipCig
def parseMD(md):
"""Separates a cigar field into list of integers and character strings."""
mdList=[]
mdL=[]
str=''
if md[0].isdigit():
before=True
else:
before=False
for i in md:
if i.isdigit():
if before==True:
str+=i
else:
mdList.append(str)
str=i
before=True
else:
if before==False:
str+=i
else:
mdList.append(str)
str=i
before=False
mdList.append(str)
for i in mdList:
if i.isdigit():
mdL.append(int(i))
else:
mdL.append(i)
return mdL
def updateM(ref,pos,seq,qual,cig,circ,mxNum,maxIndel,dir,readLen,excl):
"""Updates model with mutations, insertions, deletions in read."""
swap={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0, 't':1, 'g':2, 'c':3, 'n':4}
RposL=pos-1 #tracks leftmost pos of align against ref
RposR=pos-1 #tracks rightmost pos of align against ref
Rpos=0 #position within aligned reference slice
Spos=0 #sequence position within read
bPos=pos-1 #individual reference base position. 1 index
refSlice=''
cigH=cig
if cig[1]=='H':
cigH=cigH[2:]
if cig[-1]=='H':
cigH=cigH[:-2]
if cigH[1]=='S':
RposL-=cigH[0]
for i in range(0,len(cig),2): #slice alignment out of ref, excluding masked sections.
if cig[i+1]=='M':
RposR+=cig[i]
elif cig[i+1]=='=':
RposR+=cig[i]
elif cig[i+1]=='X':
RposR+=cig[i]
elif cig[i+1]=='D':
RposR+=cig[i]
elif cig[i+1]=='N':
refSlice+=ref[RposL:RposR] #cut before masked section.
RposR+=cig[i]
RposL=RposR
if cigH[-1]=='S':
RposR+=cigH[-2]
refSlice+=ref[RposL:RposR]
refLen=len(ref)
if dir=='f':
if RposR<refLen:
refSlice+=ref[RposR] #+1 to allow consideration of base AFTER last read base.
else:
if circ:
refSlice+=ref[0] #+1 for reads ending at last reference base (circular).
else:
refSlice+='N' #+1 for reads ending at last reference base (linear)
elif dir=='r':
if pos-2>0:
refSlice=ref[pos-2]+refSlice
else:
if circ:
refSlice=ref[-1]+refSlice
else:
refSlice='N'+refSlice
refSlice,seq,qual,cig=flip(refSlice,seq,qual,cig)
bPos=refLen-bPos-len(refSlice) #so when we increment bpos it does in the right direction
seq=seq[:readLen] #make sure fits in matrix
seq=seq.upper()
qual=qual[:readLen]
d0=0
d1=inds['N']
d2=inds['N']
d3=inds['N']
d4=inds['N']
d5=inds[refSlice[0]]
d6=5 #index for totals
if cig[1]!='H':
matrix[mxNum][d0][d1][d2][d3][d4][d5][d6]+=1
for i in range(0,len(cig),2):
if cig[i+1]=='H':
seq=seq[:Spos]+'N'*cig[i]+seq[Spos:]
Spos+=cig[i]
elif cig[i+1]=='M' or cig[i+1]=='S' or cig[i+1]=='X' or cig[i+1]=='=':
matches=cig[i]
count=0
while count<matches:
Spos+=1
Rpos+=1
bPos+=1
count+=1
refBase=refSlice[Rpos-1]
mut=seq[Spos-1]
after=refSlice[Rpos]
qualIndex=ord(qual[Spos-1])-33
if Spos>=4:
seq4=seq[Spos-4:Spos]
else:
seq4='NNNN'+seq[:Spos]
seq4=seq4[-4:]
d0=Spos
d1=inds[refBase]
d2=inds[seq4[2]]
d3=inds[seq4[1]]
d4=inds[seq4[0]]
d5=inds[after]
if mut!=refBase and refBase!='N':
snp=False
if dir=='f':
if str(bPos) in excl:
snp=True
else:
if (str(refLen-bPos)) in excl:
snp=True
if mut in 'ATCGatgcNn' and snp==False:
d6=inds[mut]
matrix[mxNum][d0][d1][d2][d3][d4][d5][d6]+=1
if qualIndex in bQualL[Spos-1]:
bQualL[Spos-1][qualIndex]+=1
else:
bQualL[Spos-1][qualIndex]=1
else:
if qualIndex in gQualL[Spos-1]:
gQualL[Spos-1][qualIndex]+=1
else:
gQualL[Spos-1][qualIndex]=1
else:
if qualIndex in gQualL[Spos-1]:
gQualL[Spos-1][qualIndex]+=1
else:
gQualL[Spos-1][qualIndex]=1
matrix[mxNum][d0][d1][d2][d3][d4][d5][5]+=1
elif cig[i+1]=='I':
if cig[i]<=maxIndel:
insert=seq[Spos:Spos+cig[i]]
iQuals=qual[Spos:Spos+cig[i]]
inDel=False
if inDel==False:
key=str(d0)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
if key in insD[mxNum]:
if insert in insD[mxNum][key]:
insD[mxNum][key][insert]+=1
else:
insD[mxNum][key][insert]=1
else:
insD[mxNum][key]={insert:1}
for q in iQuals:
qualIndex=ord(q)-33
if qualIndex in iQualL[Spos]:
iQualL[Spos][qualIndex]+=1
else:
iQualL[Spos][qualIndex]=1
Spos+=cig[i]
elif cig[i+1]=='D':
if cig[i]<=maxIndel:
inDel=False
if inDel==False:
delete=cig[i]-1 #because of 0 index
key=str(d0)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
if key in delD[mxNum]:
delD[mxNum][key][delete]+=1
else:
delD[mxNum][key]=[0]*maxIndel
delD[mxNum][key][delete]+=1
Rpos+=cig[i]
bPos+=cig[i]
elif cig[i+1]=='N':
bPos+=cig[i]
def kMers(reference,readLen,mxNum,maxIndel,minK):
nucs= ['A','T','C','G']
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
kmers=[]
reduce={}
chrs=reference.values()
ref=''
for ch in chrs:
ref+=ch+'.'
for i in nucs:
w1=i
for j in nucs:
w2=w1+j
for k in nucs:
w3=w2+k
for l in nucs:
w4=w3+l
for m in nucs:
w5=w4+m
kmers.append(w5)
for k in kmers:
c=ref.count(k)
cc=ref.count(rComp(k))
if (c+cc)<minK:
d1=inds[k[3]]
d2=inds[k[2]]
d3=inds[k[1]]
d4=inds[k[0]]
d5=inds[k[4]]
new=k[1:]
ikeys=insD[mxNum].keys()
dkeys=delD[mxNum].keys()
c=ref.count(new)
cc=ref.count(rComp(new))
if (c+cc)<minK:
new=k[2:]
c=ref.count(new)
cc=ref.count(rComp(new))
if (c+cc)<minK:
new=k[2:-1]
c=ref.count(new)
cc=ref.count(rComp(new))
if (c+cc)<minK:
for i in range(readLen+1):
tot=np.apply_over_axes(np.sum, matrix[mxNum][i], [1,2,3,4])[d1][0][0][0][0][5]
matrix[mxNum][i][d1][d2][d3][d4][d5][5]=tot
inserts={}
key=str(i)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
for ik in ikeys:
if k[:-4]==''.join(ik.split('.')[:2]):
ins=insD[mxNum][ik]
ks=ins.keys()
for s in ks:
if s in inserts:
inserts[s]+=ins[s]
else:
inserts[s]=ins[s]
if inserts!={}:
insD[mxNum][key]=inserts
dels=[0]*maxIndel
for dk in dkeys:
if k[:-4]==''.join(dk.split('.')[:2]):
dele=delD[mxNum][dk]
for e,d in enumerate(dele):
dels[e]+=d
if dels!=[0]*maxIndel:
delD[mxNum][key]=dels
for n in range(5):
val=np.apply_over_axes(np.sum, matrix[mxNum][i], [1,2,3,4])[d1][0][0][0][0][n]
matrix[mxNum][i][d1][d2][d3][d4][d5][n]=val
else:
for i in range(readLen+1):
tot=np.apply_over_axes(np.sum, matrix[mxNum][i], [2,3,4])[d1][d2][0][0][0][5]
matrix[mxNum][i][d1][d2][d3][d4][d5][5]=tot
inserts={}
key=str(i)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
for ik in ikeys:
if k[:-3]==''.join(ik.split('.')[:3]):
ins=insD[mxNum][ik]
ks=ins.keys()
for s in ks:
if s in inserts:
inserts[s]+=ins[s]
else:
inserts[s]=ins[s]
if inserts!={}:
insD[mxNum][key]=inserts
dels=[0]*maxIndel
for dk in dkeys:
if k[:-3]==''.join(dk.split('.')[:3]):
dele=delD[mxNum][dk]
for e,d in enumerate(dele):
dels[e]+=d
if dels!=[0]*maxIndel:
delD[mxNum][key]=dels
for n in range(5):
val=np.apply_over_axes(np.sum, matrix[mxNum][i], [2,3,4])[d1][d2][0][0][0][n]
matrix[mxNum][i][d1][d2][d3][d4][d5][n]=val
else:
for i in range(readLen+1):
tot=np.apply_over_axes(np.sum, matrix[mxNum][i], [2,3])[d1][d2][0][0][d5][5]
matrix[mxNum][i][d1][d2][d3][d4][d5][5]=tot
inserts={}
key=str(i)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
for ik in ikeys:
if k[:-3]==''.join(ik.split('.')[:3]) and k[-1]==ik[-1]:
ins=insD[mxNum][ik]
ks=ins.keys()
for s in ks:
if s in inserts:
inserts[s]+=ins[s]
else:
inserts[s]=ins[s]
if inserts!={}:
insD[mxNum][key]=inserts
dels=[0]*maxIndel
for dk in dkeys:
if k[:-3]==''.join(dk.split('.')[:3]) and k[-1]==dk[-1]:
dele=delD[mxNum][dk]
for e,d in enumerate(dele):
dels[e]+=d
if dels!=[0]*maxIndel:
delD[mxNum][key]=dels
for n in range(5):
val=np.apply_over_axes(np.sum, matrix[mxNum][i], [2,3])[d1][d2][0][0][d5][n]
matrix[mxNum][i][d1][d2][d3][d4][d5][n]=val
else:
for i in range(readLen+1):
tot=np.apply_over_axes(np.sum, matrix[mxNum][i], [3])[d1][d2][d3][0][d5][5]
matrix[mxNum][i][d1][d2][d3][d4][d5][5]=tot
inserts={}
key=str(i)+'.'+str(d1)+'.'+str(d2)+'.'+str(d3)+'.'+str(d4)+'.'+str(d5)
for ik in ikeys:
if k[:-2]==''.join(ik.split('.')[:4]) and k[-1]==ik[-1]:
ins=insD[mxNum][ik]
ks=ins.keys()
for s in ks:
if s in inserts:
inserts[s]+=ins[s]
else:
inserts[s]=ins[s]
if inserts!={}:
insD[mxNum][key]=inserts
dels=[0]*maxIndel
for dk in dkeys:
if k[:-2]==''.join(dk.split('.')[:4]) and k[-1]==dk[-1]:
dele=delD[mxNum][dk]
for e,d in enumerate(dele):
dels[e]+=d
if dels!=[0]*maxIndel:
delD[mxNum][key]=dels
for n in range(5):
val=np.apply_over_axes(np.sum, matrix[mxNum][i], [3])[d1][d2][d3][0][d5][n]
matrix[mxNum][i][d1][d2][d3][d4][d5][n]=val
def lowCov(readLen,mxNum):
inds=[0,1,2,3,4]
tot=np.apply_over_axes(np.sum,matrix[mxNum],[0,1,2,3,4,5])[0][0][0][0][0][0][5]
muts=np.sum(matrix[mxNum])-tot
avg=float(muts)/float(tot)
for i in range(readLen+1):
for j in inds:
for k in inds:
for l in inds:
for m in inds:
for n in inds:
if matrix[mxNum][i][j][k][l][m][n][5]<20:
for o in inds:
matrix[mxNum][i][j][k][l][m][n][o]=int(avg*matrix[mxNum][i][j][k][l][m][n][5])
def mkMxSingle(readLen,ref,samFile,name,skip,circular,maxIndel,excl,minK):
"""Creates matrices of positional errors, insertions, deletions and bases in a sam file."""
try:
f=open(samFile)
except:
errlog.error('Cannot find samFile '+samFile+'. Please check pathname.')
sys.exit('Cannot find samFile '+samFile+'. Please check pathname.')
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
global matrix, insD, delD, gQualL, bQualL, iQualL
matrix=[np.zeros([readLen+1,5,5,5,5,5,6],dtype='int32'),None,None]
insD=[{},None,None]
delD=[{},None,None]
gQualL=[] #tracks qualities for good bases
for i in range(readLen):
gQualL.append({})
bQualL=[] #tracks qualities for bad (error) bases
for i in range(readLen):
bQualL.append({})
iQualL=[]
for i in range(readLen+1):
iQualL.append({}) #tracks average qualities for insertions
readCount=0
lineCount=0
rdLenD={}
line=f.readline()
while line[0]=='@':
line=f.readline()
while line:
lineCount+=1
if skip==0 or lineCount%skip==0: #take ith read
parts=line.split('\t')
flag=int(parts[1])
if (flag & 0x04)==0: #make sure read is aligned
#parse sam format
pos=int(parts[3])
seq=parts[9]
qual=parts[10]
cigar=parts[5]
cigList=parseMD(cigar)
chr=parts[2][:50]
#update read length dictionary
seqLen=len(seq)
if seqLen in rdLenD:
rdLenD[seqLen]+=1
else:
rdLenD[seqLen]=1
if flag & 0x10:
#reverse complement
updateM(ref[chr],pos,seq,qual,cigList,circular,0,maxIndel,'r',readLen,excl)
else:
updateM(ref[chr],pos,seq,qual,cigList,circular,0,maxIndel,'f',readLen,excl)
if readCount%5000==0:
errlog.info('...parsed '+str(readCount)+' reads.')
readCount+=1
line=f.readline()
errlog.info('starting Kmers')
if minK!=0:
kMers(ref,readLen,0,maxIndel,minK)
errlog.info('finished kmers')
lowCov(readLen,0)
errlog.debug('Finished parsing reads, writing matrices to files.')
#write error models to files
modelName=name+'_s.gzip'
g=gzip.open(modelName,'wb')
cPickle.dump(readLen,g)
cPickle.dump(matrix[0], g)
cPickle.dump(insD[0], g)
cPickle.dump(delD[0], g)
cPickle.dump(gQualL,g)
cPickle.dump(bQualL,g)
cPickle.dump(iQualL,g)
cPickle.dump(readCount,g)
cPickle.dump(rdLenD,g)
g.close()
errlog.info(str(lineCount)+' unpaired reads in total.')
errlog.info('Parsed '+str(readCount)+'reads in total.')
errlog.debug('Error models written to files.')
def mkMxPaired(readLen,ref,samFile,name,skip,circular,maxIndel,excl,minK):
"""Creates matrices of positional errors, insertions, deletions and bases in a sam file."""
try:
f=open(samFile)
except:
errlog.error('Cannot find samFile '+samFile+'. Please check pathname.')
sys.exit('Cannot find samFile '+samFile+'. Please check pathname.')
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
global matrix,insD,delD,gQualL,bQualL,iQualL
matrix=[None,np.zeros([readLen+1,5,5,5,5,5,6],dtype='int32'),np.zeros([readLen+1,5,5,5,5,5,6],dtype='int32')]
insD=[None,{},{}]
delD=[None,{},{}]
intD={}
rds=[0,0]
mates=[0,0]
gQualL=[] #tracks qualities for good bases
for i in range(readLen):
gQualL.append({})
bQualL=[] #tracks qualities for bad (error) bases
for i in range(readLen):
bQualL.append({})
iQualL=[] #tracks average qualities for insertions
for i in range(readLen+1):
iQualL.append({})
readCount=0
rdLenD={}
lineCount=0
line=f.readline()
lenD={}
for i in ref.keys():
lenD[i]=len(ref[i])
while line[0]=='@':
line=f.readline()
while line:
lineCount+=1
if skip==0 or lineCount%skip==0: #remove headers
parts=line.split('\t')
flag=int(parts[1])
if (flag & 0x04)==0: #make sure read is aligned
#parse sam format
pos=int(parts[3])
posMate=int(parts[7])
seq=parts[9]
qual=parts[10]
cigar=parts[5]
chr=parts[2][:50]
reflen=lenD[chr]
cigList=parseMD(cigar)
#update read length dictionary
if (readCount)%5000==0:
errlog.info('...parsed '+str(readCount)+' reads.')
seqLen=len(seq)
if seqLen in rdLenD:
rdLenD[seqLen]+=1
else:
rdLenD[seqLen]=1
insert=(int(parts[8]))
if insert < -reflen/2:
insert=reflen-pos+posMate+len(seq)
elif insert > reflen/2:
insert=reflen-posMate+pos+len(seq)
if (insert > 0):
if intD.has_key(insert):
intD[insert]+=1
else:
intD[insert]=1
if (flag & 0x40): #reads unpaired or first in pair
if flag & 0x10:
updateM(ref[chr],pos,seq,qual,cigList,circular,1,maxIndel,'r',readLen,excl)
else:
updateM(ref[chr],pos,seq,qual,cigList,circular,1,maxIndel,'f',readLen,excl)
readCount+=1
rds[0]+=1
if (flag & 0x08):
#track alignment of mates
mates[0]+=1
if (flag & 0x80): #matrices for 2nd read in pair
if flag & 0x10:
updateM(ref[chr],pos,seq,qual,cigList,circular,2,maxIndel,'r',readLen,excl)
else:
updateM(ref[chr], pos, seq,qual,cigList,circular,2,maxIndel,'f',readLen,excl)
readCount+=1
rds[1]+=1
if (flag & 0x08):
#track alignment of mates
mates[1]+=1
line=f.readline()
mx=np.add.reduce(matrix[1],axis=0)
if minK!=0:
kMers(ref, readLen,1,maxIndel,minK)
kMers(ref, readLen,2,maxIndel,minK)
lowCov(readLen,1)
lowCov(readLen,2)
errlog.debug('Finished parsing reads, writing matrices to files.')
#write error models to files
modelName=name+'_p.gzip'
g=gzip.open(modelName,'wb')
cPickle.dump(readLen,g)
cPickle.dump(matrix[1],g)
cPickle.dump(matrix[2],g)
cPickle.dump(insD[1],g)
cPickle.dump(insD[2],g)
cPickle.dump(delD[1],g)
cPickle.dump(delD[2],g)
cPickle.dump(intD,g)
cPickle.dump(gQualL,g)
cPickle.dump(bQualL,g)
cPickle.dump(iQualL,g)
cPickle.dump(mates,g)
cPickle.dump(rds,g)
cPickle.dump(rdLenD,g)
g.close()
errlog.info(str(lineCount)+' paired reads in total.')
errlog.info('Parsed '+str(readCount)+' reads to create model.')
errlog.info(str(float(mates[0])/float(rds[0]))+'% first reads in pair with bad mates.')
errlog.info(str(float(mates[1])/float(rds[1]))+'% second reads in pair with bad mates.')
def usage():
print '\n\n########################################################################'
print '# GemSIM - Generic Error Model based SIMulator of N.G. sequencing data #'
print '########################################################################\n'
print '\nGemErr.py:\n'
print 'Takes a sam file and catalogues all the mismatches, insertions, and deletions'
print 'to create an error model for a particular sequencing run. Known true SNP'
print 'positions may be excluded.'
print '\nOptions:'
print ' -h prints these instructions.'
print ' -r read length. Set to LONGEST read in dataset.'
print ' -f reference genome in fasta format'
print ' -s input file in sam format.'
print ' -n desired output filename prefix.'
print ' -c specifies reference genome is circular. Otherwise assumed linear.'
print ' -i use only every ith read for model (optional, must be odd).'
print ' -m maximum indel size (optional, default=4).'
print ' -p use only if your data contains paired end reads.'
print ' -k minimum k-mer frequency in reference. (Default=0)'
print ' -e comma separated list of reference positions to exclude e.g. 293, 342\n\n'
def main(argv):
readLen=''
samfile=''
fasta=''
circular=''
name=''
skip=0
maxIndel=4
excl=''
paired=False
circular=False
minK=0
try:
opts, args = getopt.getopt(argv, "hr:f:s:n:ci:m:e:pk:")
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt =='-h':
usage()
sys.exit()
elif opt =='-r':
readLen=int(arg)
elif opt =='-f':
fasta=arg
elif opt =='-s':
samfile=arg
elif opt =='-n':
name=str(arg)
elif opt =='-c':
circular=True
elif opt =='-i':
skip=int(arg)
elif opt =='-m':
maxIndel=int(arg)
elif opt =='-p':
paired=True
elif opt =='-e':
excl=arg.split(',')
elif opt =='-k':
minK=int(arg)
if readLen=='' or fasta=='' or samfile=='' or name=='':
usage()
sys.exit(2)
reference=getRef(fasta)
if skip!=0:
if skip%2==0:
usage()
sys.exit(2)
if circular:
errlog.info('Treating reference genome as circular.')
else:
errlog.info('Treating reference genome as linear.')
if paired:
errlog.info('Treating reads as paired.')
mkMxPaired(readLen,reference,samfile,name,skip,circular,maxIndel,excl,minK)
else:
errlog.info('Treating reads as unpaired.')
mkMxSingle(readLen,reference,samfile,name,skip,circular,maxIndel,excl,minK)
if __name__=="__main__":
main(sys.argv[1:])
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.public_advertised_prefixes import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import PublicAdvertisedPrefixesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import PublicAdvertisedPrefixesRestTransport
class PublicAdvertisedPrefixesClientMeta(type):
"""Metaclass for the PublicAdvertisedPrefixes client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[PublicAdvertisedPrefixesTransport]]
_transport_registry["rest"] = PublicAdvertisedPrefixesRestTransport
def get_transport_class(
cls, label: str = None,
) -> Type[PublicAdvertisedPrefixesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class PublicAdvertisedPrefixesClient(metaclass=PublicAdvertisedPrefixesClientMeta):
"""The PublicAdvertisedPrefixes API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PublicAdvertisedPrefixesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PublicAdvertisedPrefixesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> PublicAdvertisedPrefixesTransport:
"""Returns the transport used by the client instance.
Returns:
PublicAdvertisedPrefixesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PublicAdvertisedPrefixesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the public advertised prefixes client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, PublicAdvertisedPrefixesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, PublicAdvertisedPrefixesTransport):
# transport is a PublicAdvertisedPrefixesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def delete_unary(
self,
request: Union[compute.DeletePublicAdvertisedPrefixeRequest, dict] = None,
*,
project: str = None,
public_advertised_prefix: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified PublicAdvertisedPrefix
Args:
request (Union[google.cloud.compute_v1.types.DeletePublicAdvertisedPrefixeRequest, dict]):
The request object. A request message for
PublicAdvertisedPrefixes.Delete. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
public_advertised_prefix (str):
Name of the PublicAdvertisedPrefix
resource to delete.
This corresponds to the ``public_advertised_prefix`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, public_advertised_prefix])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeletePublicAdvertisedPrefixeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeletePublicAdvertisedPrefixeRequest):
request = compute.DeletePublicAdvertisedPrefixeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if public_advertised_prefix is not None:
request.public_advertised_prefix = public_advertised_prefix
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: Union[compute.GetPublicAdvertisedPrefixeRequest, dict] = None,
*,
project: str = None,
public_advertised_prefix: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.PublicAdvertisedPrefix:
r"""Returns the specified PublicAdvertisedPrefix
resource.
Args:
request (Union[google.cloud.compute_v1.types.GetPublicAdvertisedPrefixeRequest, dict]):
The request object. A request message for
PublicAdvertisedPrefixes.Get. See the method description
for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
public_advertised_prefix (str):
Name of the PublicAdvertisedPrefix
resource to return.
This corresponds to the ``public_advertised_prefix`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.PublicAdvertisedPrefix:
A public advertised prefix represents
an aggregated IP prefix or netblock
which customers bring to cloud. The IP
prefix is a single unit of route
advertisement and is announced globally
to the internet.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, public_advertised_prefix])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetPublicAdvertisedPrefixeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetPublicAdvertisedPrefixeRequest):
request = compute.GetPublicAdvertisedPrefixeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if public_advertised_prefix is not None:
request.public_advertised_prefix = public_advertised_prefix
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert_unary(
self,
request: Union[compute.InsertPublicAdvertisedPrefixeRequest, dict] = None,
*,
project: str = None,
public_advertised_prefix_resource: compute.PublicAdvertisedPrefix = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a PublicAdvertisedPrefix in the specified
project using the parameters that are included in the
request.
Args:
request (Union[google.cloud.compute_v1.types.InsertPublicAdvertisedPrefixeRequest, dict]):
The request object. A request message for
PublicAdvertisedPrefixes.Insert. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix):
The body resource for this request
This corresponds to the ``public_advertised_prefix_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, public_advertised_prefix_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertPublicAdvertisedPrefixeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertPublicAdvertisedPrefixeRequest):
request = compute.InsertPublicAdvertisedPrefixeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if public_advertised_prefix_resource is not None:
request.public_advertised_prefix_resource = (
public_advertised_prefix_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: Union[compute.ListPublicAdvertisedPrefixesRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Lists the PublicAdvertisedPrefixes for a project.
Args:
request (Union[google.cloud.compute_v1.types.ListPublicAdvertisedPrefixesRequest, dict]):
The request object. A request message for
PublicAdvertisedPrefixes.List. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.public_advertised_prefixes.pagers.ListPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListPublicAdvertisedPrefixesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListPublicAdvertisedPrefixesRequest):
request = compute.ListPublicAdvertisedPrefixesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def patch_unary(
self,
request: Union[compute.PatchPublicAdvertisedPrefixeRequest, dict] = None,
*,
project: str = None,
public_advertised_prefix: str = None,
public_advertised_prefix_resource: compute.PublicAdvertisedPrefix = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Patches the specified Router resource with the data
included in the request. This method supports PATCH
semantics and uses JSON merge patch format and
processing rules.
Args:
request (Union[google.cloud.compute_v1.types.PatchPublicAdvertisedPrefixeRequest, dict]):
The request object. A request message for
PublicAdvertisedPrefixes.Patch. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
public_advertised_prefix (str):
Name of the PublicAdvertisedPrefix
resource to patch.
This corresponds to the ``public_advertised_prefix`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix):
The body resource for this request
This corresponds to the ``public_advertised_prefix_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, public_advertised_prefix, public_advertised_prefix_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.PatchPublicAdvertisedPrefixeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.PatchPublicAdvertisedPrefixeRequest):
request = compute.PatchPublicAdvertisedPrefixeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if public_advertised_prefix is not None:
request.public_advertised_prefix = public_advertised_prefix
if public_advertised_prefix_resource is not None:
request.public_advertised_prefix_resource = (
public_advertised_prefix_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.patch]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PublicAdvertisedPrefixesClient",)
| |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.test import TestCase
from django.utils import unittest
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework import exceptions
from rest_framework import permissions
from rest_framework import renderers
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import (
BaseAuthentication,
TokenAuthentication,
BasicAuthentication,
SessionAuthentication,
OAuthAuthentication,
OAuth2Authentication
)
from rest_framework.authtoken.models import Token
from rest_framework.compat import patterns, url, include
from rest_framework.compat import oauth2_provider, oauth2_provider_models, oauth2_provider_scope
from rest_framework.compat import oauth, oauth_provider
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
import base64
import time
import datetime
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns('',
(r'^session/$', MockView.as_view(authentication_classes=[SessionAuthentication])),
(r'^basic/$', MockView.as_view(authentication_classes=[BasicAuthentication])),
(r'^token/$', MockView.as_view(authentication_classes=[TokenAuthentication])),
(r'^auth-token/$', 'rest_framework.authtoken.views.obtain_auth_token'),
(r'^oauth/$', MockView.as_view(authentication_classes=[OAuthAuthentication])),
(r'^oauth-with-scope/$', MockView.as_view(authentication_classes=[OAuthAuthentication],
permission_classes=[permissions.TokenHasReadWriteScope]))
)
if oauth2_provider is not None:
urlpatterns += patterns('',
url(r'^oauth2/', include('provider.oauth2.urls', namespace='oauth2')),
url(r'^oauth2-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication])),
url(r'^oauth2-with-scope-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication],
permission_classes=[permissions.TokenHasReadWriteScope])),
)
class BasicAuthTests(TestCase):
"""Basic authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_post_form_passing_basic_auth(self):
"""Ensure POSTing json over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_basic_auth(self):
"""Ensure POSTing form over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_basic_auth(self):
"""Ensure POSTing form over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_basic_auth(self):
"""Ensure POSTing json over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'Basic realm="api"')
class SessionAuthTests(TestCase):
"""User session authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.non_csrf_client = APIClient(enforce_csrf_checks=False)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def tearDown(self):
self.csrf_client.logout()
def test_post_form_session_auth_failing_csrf(self):
"""
Ensure POSTing form over session authentication without CSRF token fails.
"""
self.csrf_client.login(username=self.username, password=self.password)
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_post_form_session_auth_passing(self):
"""
Ensure POSTing form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_form_session_auth_passing(self):
"""
Ensure PUTting form over session authentication with logged in user and CSRF token passes.
"""
self.non_csrf_client.login(username=self.username, password=self.password)
response = self.non_csrf_client.put('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_session_auth_failing(self):
"""
Ensure POSTing form over session authentication without logged in user fails.
"""
response = self.csrf_client.post('/session/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TokenAuthTests(TestCase):
"""Token authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.key = 'abcd1234'
self.token = Token.objects.create(key=self.key, user=self.user)
def test_post_form_passing_token_auth(self):
"""Ensure POSTing json over token auth with correct credentials passes and does not require CSRF"""
auth = 'Token ' + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_token_auth(self):
"""Ensure POSTing form over token auth with correct credentials passes and does not require CSRF"""
auth = "Token " + self.key
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_token_auth(self):
"""Ensure POSTing form over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_token_auth(self):
"""Ensure POSTing json over token auth without correct credentials fails"""
response = self.csrf_client.post('/token/', {'example': 'example'}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_token_has_auto_assigned_key_if_none_provided(self):
"""Ensure creating a token with no key will auto-assign a key"""
self.token.delete()
token = Token.objects.create(user=self.user)
self.assertTrue(bool(token.key))
def test_token_login_json(self):
"""Ensure token login view using JSON POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
def test_token_login_json_bad_creds(self):
"""Ensure token login view using JSON POST fails if bad credentials are used."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': "badpass"}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_json_missing_fields(self):
"""Ensure token login view using JSON POST fails if missing fields."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username}, format='json')
self.assertEqual(response.status_code, 400)
def test_token_login_form(self):
"""Ensure token login view using form POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], self.key)
class IncorrectCredentialsTests(TestCase):
def test_incorrect_credentials(self):
"""
If a request contains bad authentication credentials, then
authentication should run and error, even if no permissions
are set on the view.
"""
class IncorrectCredentialsAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('Bad credentials')
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(IncorrectCredentialsAuth,),
permission_classes=()
)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {'detail': 'Bad credentials'})
class OAuthTests(TestCase):
"""OAuth 1.0a authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
# these imports are here because oauth is optional and hiding them in try..except block or compat
# could obscure problems if something breaks
from oauth_provider.models import Consumer, Scope
from oauth_provider.models import Token as OAuthToken
from oauth_provider import consts
self.consts = consts
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CONSUMER_KEY = 'consumer_key'
self.CONSUMER_SECRET = 'consumer_secret'
self.TOKEN_KEY = "token_key"
self.TOKEN_SECRET = "token_secret"
self.consumer = Consumer.objects.create(key=self.CONSUMER_KEY, secret=self.CONSUMER_SECRET,
name='example', user=self.user, status=self.consts.ACCEPTED)
self.scope = Scope.objects.create(name="resource name", url="api/")
self.token = OAuthToken.objects.create(user=self.user, consumer=self.consumer, scope=self.scope,
token_type=OAuthToken.ACCESS, key=self.TOKEN_KEY, secret=self.TOKEN_SECRET, is_approved=True
)
def _create_authorization_header(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return req.to_header()["Authorization"]
def _create_authorization_url_parameters(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return dict(req)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_passing_oauth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_repeated_nonce_failing_oauth(self):
"""Ensure POSTing form over OAuth with repeated auth (same nonces and timestamp) credentials fails"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
# simulate reply attack auth header containes already used (nonce, timestamp) pair
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_token_removed_failing_oauth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_consumer_status_not_accepted_failing_oauth(self):
"""Ensure POSTing when consumer status is anything other than ACCEPTED fails"""
for consumer_status in (self.consts.CANCELED, self.consts.PENDING, self.consts.REJECTED):
self.consumer.status = consumer_status
self.consumer.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_request_token_failing_oauth(self):
"""Ensure POSTing with unauthorized request token instead of access token fails"""
self.token.token_type = self.token.REQUEST
self.token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_urlencoded_parameters(self):
"""Ensure POSTing with x-www-form-urlencoded auth parameters passes"""
params = self._create_authorization_url_parameters()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', params, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_get_form_with_url_parameters(self):
"""Ensure GETing with auth in url parameters passes"""
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_hmac_sha1_signature_passes(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_get_form_with_readonly_resource_passing_auth(self):
"""Ensure POSTing with a readonly scope instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.scope.is_readonly = True
read_only_access_token.scope.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth-with-scope/', params)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_readonly_resource_failing_auth(self):
"""Ensure POSTing with a readonly resource instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.scope.is_readonly = True
read_only_access_token.scope.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.post('/oauth-with-scope/', params)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_post_form_with_write_resource_passing_auth(self):
"""Ensure POSTing with a write resource succeed"""
read_write_access_token = self.token
read_write_access_token.scope.is_readonly = False
read_write_access_token.scope.save()
params = self._create_authorization_url_parameters()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth-with-scope/', params, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_bad_consumer_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': 'badconsumerkey'
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth_provider, 'django-oauth-plus not installed')
@unittest.skipUnless(oauth, 'oauth2 not installed')
def test_bad_token_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': 'badtokenkey',
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
class OAuth2Tests(TestCase):
"""OAuth 2.0 authentication"""
urls = 'rest_framework.tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CLIENT_ID = 'client_key'
self.CLIENT_SECRET = 'client_secret'
self.ACCESS_TOKEN = "access_token"
self.REFRESH_TOKEN = "refresh_token"
self.oauth2_client = oauth2_provider_models.Client.objects.create(
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
redirect_uri='',
client_type=0,
name='example',
user=None,
)
self.access_token = oauth2_provider_models.AccessToken.objects.create(
token=self.ACCESS_TOKEN,
client=self.oauth2_client,
user=self.user,
)
self.refresh_token = oauth2_provider_models.RefreshToken.objects.create(
user=self.user,
access_token=self.access_token,
client=self.oauth2_client
)
def _create_authorization_header(self, token=None):
return "Bearer {0}".format(token or self.access_token.token)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_type_failing(self):
"""Ensure that a wrong token type lead to the correct HTTP error status code"""
auth = "Wrong token-type-obsviously"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_format_failing(self):
"""Ensure that a wrong token format lead to the correct HTTP error status code"""
auth = "Bearer wrong token format"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_failing(self):
"""Ensure that a wrong token lead to the correct HTTP error status code"""
auth = "Bearer wrong-token"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_passing_auth(self):
"""Ensure GETing form over OAuth with correct client credentials succeed"""
auth = self._create_authorization_header()
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_passing_auth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_token_removed_failing_auth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.access_token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_refresh_token_failing_auth(self):
"""Ensure POSTing with refresh token instead of access token fails"""
auth = self._create_authorization_header(token=self.refresh_token.token)
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_expired_access_token_failing_auth(self):
"""Ensure POSTing with expired access token fails with an 'Invalid token' error"""
self.access_token.expires = datetime.datetime.now() - datetime.timedelta(seconds=10) # 10 seconds late
self.access_token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
self.assertIn('Invalid token', response.content)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_invalid_scope_failing_auth(self):
"""Ensure POSTing with a readonly scope instead of a write scope fails"""
read_only_access_token = self.access_token
read_only_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['read']
read_only_access_token.save()
auth = self._create_authorization_header(token=read_only_access_token.token)
response = self.csrf_client.get('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_valid_scope_passing_auth(self):
"""Ensure POSTing with a write scope succeed"""
read_write_access_token = self.access_token
read_write_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['write']
read_write_access_token.save()
auth = self._create_authorization_header(token=read_write_access_token.token)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
class FailingAuthAccessedInRenderer(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if request.user.is_authenticated():
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
self.assertEqual(content, b'not authenticated')
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for memory resource limits
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.integration.lib.base import (
Account,
ServiceOffering,
VirtualMachine,
Resources,
Domain
)
from marvin.integration.lib.common import (get_domain,
get_zone,
get_template,
cleanup_resources,
wait_for_cleanup,
find_suitable_host,
get_resource_type
)
class Services:
"""Test memory resource limit services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "resource",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 2048, # In MBs
},
"virtual_machine": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'KVM',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"domain": {
"name": "Domain",
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
"mode": 'advanced',
# Networking mode: Advanced, Basic
}
class TestMemoryLimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestMemoryLimits,
cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering, ]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True
)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
self.vm = self.createInstance(service_off=self.service_offering)
self.cleanup = [self.account, ]
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
self.account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
@attr(tags=["advanced", "advancedns","simulator"])
def test_01_stop_start_instance(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM as root admin
# 2 .List Resource count for the root admin Memory usage
# 3. Stop and start instance, resource count should list properly.
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Stopping instance: %s" % self.vm.name)
try:
self.vm.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_stop = account_list[0].memorytotal
self.assertEqual(resource_count, resource_count_after_stop,
"Resource count should be same after stopping the instance")
self.debug("Starting instance: %s" % self.vm.name)
try:
self.vm.start(self.apiclient)
except Exception as e:
self.fail("Failed to start instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_start = account_list[0].memorytotal
self.assertEqual(resource_count, resource_count_after_start,
"Resource count should be same after stopping the instance")
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_02_migrate_instance(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM as root admin
# 2. List Resource count for the root admin Memory usage
# 3. Migrate vm, resource count should list properly.
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
host = find_suitable_host(self.apiclient, self.vm)
self.debug("Migrating instance: %s to host: %s" % (self.vm.name, host.name))
try:
self.vm.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_migrate = account_list[0].memorytotal
self.assertEqual(resource_count, resource_count_after_migrate,
"Resource count should be same after stopping the instance")
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_03_delete_instance(self):
"""Test Deploy VM with specified GB RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM as root admin
# 2. List Resource count for the root admin Memory usage
# 3. Delete instance, resource count should be 0 after delete operation.
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Destroying instance: %s" % self.vm.name)
try:
self.vm.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
# Wait for expunge interval to cleanup Memory
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_delete = account_list[0].memorytotal
self.assertEqual(resource_count_after_delete, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_04_deploy_multiple_vm(self):
"""Test Deploy multiple VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM
# 2. Deploy multiple VMs with this service offering
# 3. List Resource count for the root admin Memory usage
# 4. Memory usage should list properly
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Creating two instances with service offering: %s" %
self.service_offering.name)
vm_1 = self.createInstance(service_off=self.service_offering)
self.createInstance(service_off=self.service_offering)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_new = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"]) * 3 #Total 3 VMs
self.assertEqual(resource_count_new, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Destroying instance: %s" % vm_1.name)
try:
vm_1.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_delete = account_list[0].memorytotal
expected_resource_count -= int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count_after_delete, expected_resource_count,
"Resource count should match with the expected resource count")
return
class TestDomainMemoryLimitsConfiguration(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestDomainMemoryLimitsConfiguration,
cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering, ]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
self.account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self):
self.debug("Creating a domain under: %s" % self.domain.name)
self.child_domain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_1.id
)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_1)
self.cleanup.append(self.child_domain_1)
self.debug("Creating a domain under: %s" % self.domain.name)
self.child_domain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_2.id)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_2)
self.cleanup.append(self.child_domain_2)
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_01_stop_start_instance(self):
"""Test Deploy VM with 5 GB memory & verify the usage"""
# Validate the following
# 1. Create compute offering with 5 GB memory in child domains of root domain & Deploy VM
# 2. List Resource count memory usage
# 3. Stop and Start instance, check resource count.
# 4. Resource count should list properly.
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = {self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should match with the expected resource count")
self.debug("Stopping instance: %s" % vm.name)
try:
vm.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_stop = account_list[0].memorytotal
self.assertEqual(resource_count, resource_count_after_stop,
"Resource count should be same after stopping the instance")
self.debug("Starting instance: %s" % vm.name)
try:
vm.start(self.apiclient)
except Exception as e:
self.fail("Failed to start instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_start = account_list[0].memorytotal
self.assertEqual(resource_count_after_stop, resource_count_after_start,
"Resource count should be same after starting the instance")
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_02_migrate_instance(self):
"""Test Deploy VM with specified memory & verify the usage"""
# Validate the following
# 1. Create compute offering with specified memory in child domains of root domain & Deploy VM
# 2. List Resource count
# 3. Migrate instance to another host
# 4. Resource count should list properly.
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = {self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should with the expected resource count")
host = find_suitable_host(self.apiclient, vm)
self.debug("Migrating instance: %s to host: %s" %
(vm.name, host.name))
try:
vm.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_migrate = account_list[0].memorytotal
self.assertEqual(resource_count, resource_count_after_migrate,
"Resource count should be same after starting the instance")
return
@attr(tags=["advanced", "advancedns","simulator"])
def test_03_delete_instance(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM in child domains of root domain & Deploy VM
# 2. List Resource count for the Memory usage
# 3. Delete instance
# 4. Resource count should list as 0
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = {self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should match with the expected resource count")
self.debug("Destroying instance: %s" % vm.name)
try:
vm.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
self.assertEqual(resource_count, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
@attr(tags=["advanced", "advancedns","simulator"])
@attr(configuration='max.account.memory')
def test_04_deploy_multiple_vm(self):
"""Test Deploy multiple VM with 2 GB memory & verify the usage"""
#keep the configuration value - max.account.memory = 8192 (maximum 4 instances per account with 2 GB RAM)
# Validate the following
# 1. Create compute offering with 2 GB RAM
# 2. Deploy multiple VMs with this service offering in child domains of root domain
# 3. List Resource count for the root admin Memory usage
# 4. Memory usage should list properly
self.debug("Creating service offering with 2 GB RAM")
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = {self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
memory_account_gc = Resources.list(self.apiclient,
resourcetype = 9, #Memory
account = self.account.name,
domainid = self.domain.id
)
if memory_account_gc[0].max != 8192:
self.skipTest("This test case requires configuration value max.account.memory to be 8192")
api_client = self.testClient.createUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm_1 = self.createInstance(service_off=self.service_offering, api_client=api_client)
vm_2 = self.createInstance(service_off=self.service_offering, api_client=api_client)
self.createInstance(service_off=self.service_offering, api_client=api_client)
self.createInstance(service_off=self.service_offering, api_client=api_client)
self.debug("Deploying instance - memory capacity is fully utilized")
with self.assertRaises(Exception):
self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"]) * 4 #Total 4 vms
self.assertEqual(resource_count, expected_resource_count,
"Initial resource count should with the expected resource count")
self.debug("Destroying instance: %s" % vm_1.name)
try:
vm_1.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_delete = account_list[0].memorytotal
expected_resource_count -= int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count_after_delete, expected_resource_count,
"Resource count should match with the expected resource count")
host = find_suitable_host(self.apiclient, vm_2)
self.debug("Migrating instance: %s to host: %s" % (vm_2.name,
host.name))
try:
vm_2.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_migrate = account_list[0].memorytotal
self.debug(resource_count_after_migrate)
self.assertEqual(resource_count_after_delete, resource_count_after_migrate,
"Resource count should be same after migrating the instance")
return
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| |
#!/usr/bin/env python
import os
import sys
import unittest
sys.path.insert(0, "..")
from py010parser import parse_file, parse_string, c_ast
def template_path(template_name):
return os.path.join(os.path.dirname(__file__), "templates", template_name)
class TestBasicParse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_struct(self):
res = parse_string("""
struct NAME {
int blah;
} name;
""", optimize=True, predefine_types=False)
def test_basic_struct_with_args(self):
res = parse_string("""
struct NAME (int a) {
int blah;
} name;
""", optimize=True, predefine_types=False)
def test_basic_struct_with_args2(self):
res = parse_string("""
typedef struct (int a) {
int blah;
} SPECIAL_STRUCT;
""", optimize=True, predefine_types=False)
def test_basic_struct_with_args_calling(self):
res = parse_string("""
typedef struct (int a) {
int blah;
} SPECIAL_STRUCT;
SPECIAL_STRUCT test(10);
int blah() {
return 10;
}
""", optimize=True, predefine_types=False)
decl = res.children()[1][1]
self.assertTrue(isinstance(decl.type, c_ast.StructCallTypeDecl))
decl_args = decl.type.args.children()
self.assertEqual(decl_args[0][1].value, "10")
self.assertEqual(len(decl_args), 1)
def test_struct_with_args_calling_not_func_decl(self):
res = parse_string("""
typedef struct(int a) {
char chars[a];
} test_structure;
local int size = 4;
test_structure test(size); // this SHOULD NOT be a function declaration
""", predefine_types=False)
decl = res.children()[2][1]
self.assertEqual(decl.type.__class__, c_ast.StructCallTypeDecl)
self.assertEqual(decl.type.args.__class__, c_ast.ExprList)
def test_struct_with_args_calling_not_func_decl2(self):
res = parse_string("""
typedef struct(int a) {
char chars[a];
} test_structure;
local int size = 4;
test_structure test(size); // this SHOULD NOT be a function declaration
""", predefine_types=False)
decl = res.children()[2][1]
self.assertEqual(decl.type.__class__, c_ast.StructCallTypeDecl)
self.assertEqual(decl.type.args.__class__, c_ast.ExprList)
def test_struct_with_args_calling_not_func_decl3(self):
res = parse_string("""
typedef struct(int a, int b) {
char chars1[a];
char chars2[b];
} test_structure;
local int size = 4;
test_structure test(size, 5); // this SHOULD NOT be a function declaration
""", predefine_types=False)
decl = res.children()[2][1]
self.assertEqual(decl.type.__class__, c_ast.StructCallTypeDecl)
self.assertEqual(decl.type.args.__class__, c_ast.ExprList)
def test_sizeof_unary(self):
res = parse_string("""
sizeof(this);
""")
def test_exists_unary(self):
res = parse_string("""
exists(this);
""")
def test_parentof_unary(self):
res = parse_string("""
parentof(this);
""")
def test_function_exists_unary(self):
res = parse_string("""
function_exists(this);
""")
def test_startof_unary(self):
res = parse_string("""
startof(this);
""")
def test_bitfield_in_if(self):
res = parse_string("""
struct {
if(1) {
int field1:16;
//int field1;
}
} blah;
""", optimize=True, predefine_types=False)
bitfield_decl = res.children()[0][1].type.type.children()[0][1].iftrue.children()[0][1]
self.assertNotEqual(type(bitfield_decl), dict)
def test_bitfield_outside_of_struct(self):
res = parse_string("""
uint blah1:4;
uint blah2:8;
uint blah3:4;
""", optimize=True, predefine_types=True)
def test_basic(self):
res = parse_string("""
struct NAME {
int stringLength;
char name[stringLength];
} name;
""", optimize=True)
def test_if_in_struct(self):
res = parse_string("""
struct BLAH {
int a:1;
int b:2;
int c:29;
if(hello) {
b = 10;
}
} blah;
""", optimize=True)
def test_declaration_in_struct(self):
res = parse_string("""
int c;
switch(c) {
case 1:
c++;
case 2:
int c;
}
""", optimize=True)
def test_declaration_in_if(self):
res = parse_string("""
if(1) {
int c;
} else {
int b;
}
""", optimize=True)
def test_switch_in_struct(self):
res = parse_string("""
struct BLAH {
int c;
switch(c) {
case 1:
int aa;
case 2:
int bb;
default:
int cc;
}
} blah;
""", optimize=True)
def test_nested_structs(self):
res = parse_string("""
struct FILE {
struct HEADER {
char type[4];
int version;
int numRecords;
} header;
struct RECORD {
int employeeId;
char name[40];
float salary;
} record[ header.numRecords ];
} file;
""", optimize=True)
# http://www.sweetscape.com/010editor/manual/TemplateVariables.htm
def test_local_keyword(self):
res = parse_string("""
local int a;
local int b;
""", optimize=True)
def test_metadata(self):
res = parse_string("""
local int a <hidden=true>;
""", optimize=True)
def test_typedef(self):
res = parse_string("""
typedef unsigned int UINT2;
UINT2 blah;
""", optimize=True)
def test_value_types(self):
res = parse_string("""
time_t var1;
OLETIME var2;
FILETIME var3;
DOSTIME var4;
DOSDATE var5;
HFLOAT var6;
hfloat var7;
DOUBLE var8;
double var9;
FLOAT var10;
float var11;
__uint64 var12;
QWORD var13;
UINT64 var14;
UQUAD var15;
uquad var16;
uint64 var17;
__int64 var18;
INT64 var19;
QUAD var20;
quad var21;
int64 var22;
DWORD var23;
ULONG var24;
UINT32 var25;
UINT var26;
ulong var27;
uint32 var28;
uint var29;
LONG var30;
INT32 var31;
INT var32;
long var33;
int32 var34;
int var35;
WORD var36;
UINT16 var37;
USHORT var38;
uint16 var39;
ushort var40;
INT16 var41;
SHORT var42;
int16 var43;
short var44;
UBYTE var45;
UCHAR var46;
ubyte var47;
uchar var48;
BYTE var49;
CHAR var50;
byte var51;
char var52;
string var53;
wstring var54;
wchar_t var55;
""", optimize=True)
def test_block_item_at_root(self):
# had to get rid of the default int ret val on functions
# from pycparser
res = parse_string("""
int a = 10;
void some_function(int num) {
some_function();
}
a++;
some_function();
""", optimize=True)
def test_pass_by_reference(self):
res = parse_string("""
void some_function(int &num, int &num2) {
}
void some_function(int &num2) {
}
""", optimize=True)
def test_enum_types(self):
# note that there have been problems using a built-in
# type (int/float/etc) vs the typedefd ones, TYPEID vs
res = parse_string("""
enum <ulong> COLORS {
WHITE = 1
} var1;
enum <int> COLORS {
WHITE = 1
} var1;
enum IFD_dirtype {
IFD_TYPE_EXIF = 1,
IFD_TYPE_GEOTAG,
IFD_TYPE_CASIO_QV_R62,
};
enum {
TEST,
TEST2
} blah;
""", optimize=True)
def test_struct_bitfield_with_metadata(self):
res = parse_string("""
typedef struct tgCifDirEntry {
uint16 storage_method : 2;
uint16 data_type : 3;
uint16 id_code : 11 <format=hex>;
} CifDirEntry <read=ReadCifDirEntry>;
""", optimize=True)
def test_untypedefd_enum_as_typeid(self):
res = parse_string("""
enum <ulong> BLAH {
BLAH1, BLAH2, BLAH3
};
local BLAH x;
""", optimize=True)
def test_initializer_in_struct(self):
res = parse_string("""
local int b = 11;
typedef struct BLAH {
local int a = 10;
int a:10;
} blah;
""", optimize=True)
def test_nested_bitfield_in_struct(self):
res = parse_string("""
typedef struct BLAH {
int a;
switch(a) {
case 10:
int b:10;
default:
int c:10;
}
} blah;
""", optimize=True)
def test_single_decl_in_for_loop(self):
res = parse_string("""
for(j = 0; j < 10; j++)
ushort blah;
""", optimize=True)
def test_single_decl_in_while_loop(self):
res = parse_string("""
while(1)
ushort blah;
""", optimize=True)
def test_single_decl_in_do_while_loop(self):
res = parse_string("""
while(1)
ushort blah;
""", optimize=True)
def test_single_decl_in_do_while_loop(self):
res = parse_string("""
do
ushort blah;
while(1);
""", optimize=True)
def test_single_decl_in_if(self):
res = parse_string("""
if(1)
ushort blah;
""", optimize=True)
def test_single_decl_in_if_else(self):
res = parse_string("""
if(1)
ushort blah;
else
ushort blah;
if(1) {
ushort blah;
} else
ushort blah;
if(1)
ushort blah;
else {
ushort blah;
}
if(1) {
ushort blah;
} else {
ushort blah;
}
""", optimize=True)
def test_implicit_struct_typedef(self):
res = parse_string("""
struct Blah { int a; } blah;
Blah b;
""", optimize=True)
# I think we'll make a break from 010 syntax here...
# it's too ridiculous to me to allow types that have
# not yet been defined
def test_runtime_declared_type(self):
res = parse_string("""
void ReadAscString1(StrAscii1 &s) {
;
}
""", optimize=True, predefine_types=False)
def test_metadata_with_string_value(self):
res = parse_string("""
int a <comment="this is a comment", key=val>;
int a <comment="this is a comment">;
""", optimize=True)
def test_large_template(self):
res = parse_file(template_path("JPGTemplate.bt"))
def test_png_template(self):
res = parse_file(template_path("PNGTemplate.bt"))
def test_preprocessor_with_string(self):
res = parse_string("""
//this shouldn't cause any problems
int a;
""", optimize=True)
def test_metadata_with_space1(self):
res = parse_string("""
int a < key1 = value1 >;
""", optimize=True)
def test_metadata_with_space2(self):
res = parse_string("""
int a < key1 = value1 , key2 = value2 >;
""", optimize=True)
def test_two_part_struct_decl(self):
res = parse_string("""
struct StructTest;
StructTest testing;
""", optimize=True)
if __name__ == "__main__":
unittest.main()
| |
'''
userManager for Docklet
provide a class for managing users and usergroups in Docklet
Warning: in some early versions, "token" stand for the instance of class model.User
now it stands for a string that can be parsed to get that instance.
in all functions start with "@administration_required" or "@administration_or_self_required", "token" is the instance
Original author: Liu Peidong
'''
from utils.model import db, User, UserGroup, Notification, UserUsage, LoginMsg, LoginFailMsg
from functools import wraps
import os, subprocess, math
import hashlib
import pam
from base64 import b64encode
from utils import env
from master.settings import settings
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from datetime import datetime, timedelta
import json
from utils.log import logger
from utils.lvmtool import *
PAM = pam.pam()
fspath = env.getenv('FS_PREFIX')
data_quota = env.getenv('DATA_QUOTA')
data_quota_cmd = env.getenv('DATA_QUOTA_CMD')
if (env.getenv('EXTERNAL_LOGIN').lower() == 'true'):
from plugin import external_receive
def administration_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
cur_user = kwargs['cur_user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root')):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def administration_or_self_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( (not ('cur_user' in kwargs)) or (not ('user' in kwargs))):
return {"success":'false', "reason":"Cannot get cur_user or user"}
cur_user = kwargs['cur_user']
user = kwargs['user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root') or (cur_user.username == user.username)):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def token_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
return func(*args, **kwargs)
return wrapper
def send_activated_email(to_address, username):
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
if (email_from_address in ['\'\'', '\"\"', '']):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ username + ':</h4>'
text += '''<p> Your account in <a href='%s'>%s</a> has been activated</p>
<p> Enjoy your personal workspace in the cloud !</p>
<br>
<p> Note: DO NOT reply to this email!</p>
<br><br>
<p> <a href='http://docklet.unias.org'>Docklet Team</a>, SEI, PKU</p>
''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.now()) + '</p>'
text += '</html>'
subject = 'Docklet account activated'
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = to_address
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
s.sendmail(email_from_address, to_address, msg.as_string())
s.close()
def send_remind_activating_email(username):
#admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS')
nulladdr = ['\'\'', '\"\"', '']
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
admin_email_address = settings.get('ADMIN_EMAIL_ADDRESS')
if (email_from_address in nulladdr or admin_email_address in nulladdr):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ 'admin' + ':</h4>'
text += '''<p> An activating request for %s in <a href='%s'>%s</a> has been sent</p>
<p> Please check it !</p>
<br/><br/>
<p> Docklet Team, SEI, PKU</p>
''' % (username, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.utcnow()) + '</p>'
text += '</html>'
subject = 'An activating request in Docklet has been sent'
if admin_email_address[0] == '"':
admins_addr = admin_email_address[1:-1].split(" ")
else:
admins_addr = admin_email_address.split(" ")
alladdr=""
for addr in admins_addr:
alladdr = alladdr+addr+", "
alladdr=alladdr[:-2]
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = alladdr
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
try:
s.sendmail(email_from_address, admins_addr, msg.as_string())
except Exception as e:
logger.error(e)
s.close()
class userManager:
def __init__(self, username = 'root', password = None):
'''
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
'''
try:
User.query.all()
except:
db.create_all()
if password == None:
#set a random password
password = os.urandom(16)
password = b64encode(password).decode('utf-8')
fsdir = env.getenv('FS_PREFIX')
f = open(fsdir + '/local/generated_password.txt', 'w')
f.write("User=%s\nPass=%s\n"%(username, password))
f.close()
sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest())
sys_admin.status = 'normal'
sys_admin.nickname = 'root'
sys_admin.description = 'Root_User'
sys_admin.user_group = 'root'
sys_admin.auth_method = 'local'
db.session.add(sys_admin)
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/master/userinit.sh", username])
db.session.commit()
if not os.path.exists(fspath+"/global/sys/quota"):
groupfile = open(fspath+"/global/sys/quota",'w')
groups = []
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groupfile.write(json.dumps(groups))
groupfile.close()
if not os.path.exists(fspath+"/global/sys/quotainfo"):
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotas = {}
quotas['default'] = 'foundation'
quotas['quotainfo'] = []
quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'})
quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'})
quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'})
quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'})
quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'})
quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'})
quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'})
quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'})
quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotafile.write(json.dumps(quotas))
quotafile.close()
if not os.path.exists(fspath+"/global/sys/lxc.default"):
settingfile = open(fspath+"/global/sys/lxc.default", 'w')
settings = {}
settings['cpu'] = "2"
settings["memory"] = "2000"
settings["disk"] = "2000"
settingfile.write(json.dumps(settings))
settingfile.close()
try:
UserUsage.query.all()
LoginMsg.query.all()
LoginFailMsg.query.all()
except:
db.create_all()
def auth_local(self, username, password):
password = hashlib.sha512(password.encode('utf-8')).hexdigest()
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason": "User does not exist!"}
if (user.password != password):
return {"success":'false', "reason": "Wrong password!"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_pam(self, username, password):
user = User.query.filter_by(username = username).first()
pamresult = PAM.authenticate(username, password)
if (pamresult == False or (user != None and user.auth_method != 'pam')):
return {"success":'false', "reason": "Wrong password or wrong login method!"}
if (user == None):
newuser = self.newuser();
newuser.username = username
newuser.password = "no_password"
newuser.nickname = username
newuser.status = "init"
newuser.user_group = "primary"
newuser.auth_method = "pam"
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_external(self, form, userip=""):
if (env.getenv('EXTERNAL_LOGIN') != 'True'):
failed_result = {'success': 'false', 'reason' : 'external auth disabled'}
return failed_result
result = external_receive.external_auth_receive_request(form)
if (result['success'] != 'True'):
failed_result = {'success':'false', 'result': result}
return failed_result
username = result['username']
logger.info("External login success: username=%s, userip=%s" % (username, userip))
loginmsg = LoginMsg(username,userip)
db.session.add(loginmsg)
db.session.commit()
user = User.query.filter_by(username = username).first()
if (user != None and user.auth_method == result['auth_method']):
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
if (user != None and user.auth_method != result['auth_method']):
result = {'success': 'false', 'reason': 'other kinds of account already exists'}
return result
#user == None , register an account for external user
newuser = self.newuser();
newuser.username = result['username']
newuser.password = result['password']
newuser.avatar = result['avatar']
newuser.nickname = result['nickname']
newuser.description = result['description']
newuser.e_mail = result['e_mail']
newuser.truename = result['truename']
newuser.student_number = result['student_number']
newuser.status = result['status']
newuser.user_group = result['user_group']
newuser.auth_method = result['auth_method']
newuser.department = result['department']
newuser.tel = result['tel']
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth(self, username, password, userip=""):
'''
authenticate a user by username & password
return a token as well as some user information
'''
user = User.query.filter_by(username = username).first()
failmsg = LoginFailMsg.query.filter_by(username = username).first()
result = {}
if failmsg == None:
newfailmsg = LoginFailMsg(username)
db.session.add(newfailmsg)
db.session.commit()
failmsg = newfailmsg
elif failmsg.failcnt > 40:
reason = "You have been input wrong password over 40 times. You account will be locked. Please contact administrators for help."
logger.info("Login failed: userip=%s reason:%s" % (userip,reason))
return {'success':'false', 'reason':reason}
elif datetime.now() < failmsg.bantime:
reason = "You have been input wrong password %d times. Please try after %s." % (failmsg.failcnt, failmsg.bantime.strftime("%Y-%m-%d %H:%M:%S"))
logger.info("Login failed: userip=%s reason:%s" % (userip,reason))
return {'success':'false', 'reason':reason}
if (user == None or user.auth_method =='local'):
result = self.auth_local(username, password)
elif (user.auth_method == 'pam'):
result = self.auth_pam(username, password)
else:
result = {'success':'false', 'reason':'auth_method error!'}
if result['success'] == 'true':
loginmsg = LoginMsg(result['data']['username'],userip)
failmsg.failcnt = 0
db.session.add(loginmsg)
db.session.commit()
logger.info("Login success: username=%s, userip=%s" % (result['data']['username'], userip))
else:
logger.info("Login failed: userip=%s" % (userip))
failmsg.failcnt += 1
if failmsg.failcnt == 10:
failmsg.bantime = datetime.now() + timedelta(minutes=10)
elif failmsg.failcnt == 20:
failmsg.bantime = datetime.now() + timedelta(minutes=100)
elif failmsg.failcnt == 30:
failmsg.bantime = datetime.now() + timedelta(days=1)
db.session.commit()
return result
def auth_token(self, token):
'''
authenticate a user by a token
when succeeded, return the database iterator
otherwise return None
'''
user = User.verify_auth_token(token)
return user
def set_nfs_quota_bygroup(self,groupname, quota):
if not data_quota == "True":
return
users = User.query.filter_by(user_group = groupname).all()
for user in users:
self.set_nfs_quota(user.username, quota)
def set_nfs_quota(self, username, quota):
if not data_quota == "True":
return
nfspath = "/users/%s/data" % username
try:
cmd = data_quota_cmd % (nfspath,quota+"GB")
sys_run(cmd.strip('"'))
except Exception as e:
logger.error(e)
@administration_required
def query(*args, **kwargs):
'''
Usage: query(username = 'xxx', cur_user = token_from_auth)
|| query(ID = a_integer, cur_user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'ID' in kwargs):
user = User.query.filter_by(id = kwargs['ID']).first()
if (user == None):
return {"success":False, "reason":"User does not exist"}
result = {
"success":'true',
"data":{
"username" : user.username,
"id": user.id,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"description" : user.description,
"beans" : user.beans,
},
"token": user
}
return result
if ( 'username' not in kwargs):
return {"success":'false', "reason":"Cannot get 'username'"}
username = kwargs['username']
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"id": user.id,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"beans" : user.beans,
},
"token": user
}
return result
@token_required
def selfQuery(*args, **kwargs):
'''
Usage: selfQuery(cur_user = token_from_auth)
List informantion for oneself
'''
user = kwargs['cur_user']
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = None
for one_group in groups:
if one_group['name'] == user.user_group:
group = one_group['quotas']
break
else:
for one_group in groups:
if one_group['name'] == "primary":
group = one_group['quotas']
break
result = {
"success": 'true',
"data":{
"username" : user.username,
"id": user.id,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"groupinfo": group,
"beans" : user.beans,
"auth_method": user.auth_method,
},
}
return result
@token_required
def selfModify(*args, **kwargs):
'''
Usage: selfModify(cur_user = token_from_auth, newValue = form)
Modify informantion for oneself
'''
form = kwargs['newValue']
name = form.get('name', None)
value = form.get('value', None)
if (name == None or value == None):
result = {'success': 'false'}
return result
user = User.query.filter_by(username = kwargs['cur_user'].username).first()
if (name == 'nickname'):
user.nickname = value
elif (name == 'description'):
user.description = value
elif (name == 'department'):
user.department = value
elif (name == 'e_mail'):
user.e_mail = value
elif (name == 'tel'):
user.tel = value
elif (name == 'password'):
old_password = hashlib.sha512(form.get('old_value', '').encode('utf-8')).hexdigest()
if (user.password != old_password):
result = {'success': 'false'}
return result
user.password = hashlib.sha512(value.encode('utf-8')).hexdigest()
else:
result = {'success': 'false'}
return result
db.session.commit()
result = {'success': 'true'}
return result
@token_required
def usageQuery(self, *args, **kwargs):
'''
Usage: usageQuery(cur_user = token_from_auth)
Query the quota and usage of user
'''
cur_user = kwargs['cur_user']
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usageinfo = {
'username': cur_user.username,
'cpu': '0',
'memory': '0',
'disk': '0'
}
else:
usageinfo = {
'username': usage.username,
'cpu': usage.cpu,
'memory': usage.memory,
'disk': usage.disk
}
settingfile = open(fspath+"/global/sys/lxc.default" , 'r')
defaultsetting = json.loads(settingfile.read())
settingfile.close()
return {'success': 'true', 'quota' : groupinfo, 'usage' : usageinfo, 'default': defaultsetting }
@token_required
def usageInc(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Modify the usage info of user
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("record usage for user:%s" % cur_user.username)
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if int(modification['cpu']) <= 0 or int(modification['memory']) <= 0 or int(modification['disk']) <= 0:
return {'success':False, 'result':"cpu,memory and disk setting cannot less than zero"}
cpu = int(usage.cpu) + int(modification['cpu'])
memory = int(usage.memory) + int(modification['memory'])
disk = int(usage.disk) + int(modification['disk'])
if cpu > int(groupinfo['cpu']):
logger.error("cpu quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"cpu quota exceed"}
if memory > int(groupinfo['memory']):
logger.error("memory quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"memory quota exceed"}
if disk > int(groupinfo['disk']):
logger.error("disk quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"disk quota exceed"}
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True, 'result':"distribute the resource"}
@token_required
def usageRecover(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Recover the usage info when create container failed
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("recover usage for user:%s" % cur_user.username)
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
return True
cpu = int(usage.cpu) - int(modification['cpu'])
memory = int(usage.memory) - int(modification['memory'])
disk = int(usage.disk) - int(modification['disk'])
if cpu < 0:
cpu = 0
if memory < 0:
memory = 0
if disk < 0:
disk = 0
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True}
@token_required
def usageRelease(self, *args, **kwargs):
cur_user = kwargs['cur_user']
cpu = kwargs['cpu']
memory = kwargs['memory']
disk = kwargs['disk']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
return {'success':True}
nowcpu = int(usage.cpu) - int(cpu)
nowmemory = int(usage.memory) - int(memory)
nowdisk = int(usage.disk) - int(disk)
if nowcpu < 0:
nowcpu = 0
if nowmemory < 0:
nowmemory = 0
if nowdisk < 0:
nowdisk = 0
usage.cpu = str(nowcpu)
usage.memory = str(nowmemory)
usage.disk = str(nowdisk)
db.session.commit()
return {'success':True}
def initUsage(*args, **kwargs):
"""
init the usage info when start docklet with init mode
"""
usages = UserUsage.query.all()
for usage in usages:
usage.cpu = "0"
usage.memory = "0"
usage.disk = "0"
db.session.commit()
return True
@administration_required
def userList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all users for an administrator
'''
alluser = User.query.all()
result = {
"success": 'true',
"data":[]
}
for user in alluser:
userinfo = [
user.id,
user.username,
user.truename,
user.e_mail,
user.tel,
"%s"%(user.register_date),
user.status,
user.user_group,
user.beans,
'',
]
result["data"].append(userinfo)
return result
@administration_required
def groupList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all groups for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
result = {
"success": 'true',
"groups": groups,
"quotas": quotas['quotainfo'],
"default": quotas['default'],
}
return result
@administration_required
def change_default_group(*args, **kwargs):
form = kwargs['form']
default_group = form.get('defaultgroup')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['default'] = default_group
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return { 'success':'true', 'action':'change default group' }
def groupQuery(self, *args, **kwargs):
'''
Usage: groupQuery(name = XXX, cur_user = token_from_auth)
List a group for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['name']:
result = {
"success":'true',
"data": group['quotas'],
}
return result
else:
return {"success":False, "reason":"Group does not exist"}
@administration_required
def groupListName(*args, **kwargs):
'''
Usage: grouplist(cur_user = token_from_auth)
List all group names for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
result = {
"groups": [],
}
for group in groups:
result["groups"].append(group['name'])
return result
@administration_required
def groupModify(self, *args, **kwargs):
'''
Usage: groupModify(newValue = dict_from_form, cur_user = token_from_auth)
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['newValue'].get('groupname',None):
form = kwargs['newValue']
for key in form.keys():
if key == "data":
if not group['quotas'][key] == form.get(key):
self.set_nfs_quota_bygroup(group['name'],form.get(key))
else:
pass
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group["quotas"][key] = val
else:
group['quotas'][key] = form.get(key)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
else:
return {"success":'false', "reason":"UserGroup does not exist"}
@administration_required
def modify(self, *args, **kwargs):
'''
modify a user's information in database
will send an e-mail when status is changed from 'applying' to 'normal'
Usage: modify(newValue = dict_from_form, cur_user = token_from_auth)
'''
if ( kwargs['newValue'].get('Instruction', '') == 'Activate'):
user_modify = User.query.filter_by(id = kwargs['newValue'].get('ID', None)).first()
user_modify.status = 'normal'
send_activated_email(user_modify.e_mail, user_modify.username)
db.session.commit()
return {"success": "true"}
if ( kwargs['newValue'].get('password', '') != ''):
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
new_password = kwargs['newValue'].get('password','')
new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
user_modify.password = new_password
db.session.commit()
return {"success": "true"}
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
if (user_modify == None):
return {"success":'false', "reason":"User does not exist"}
#try:
form = kwargs['newValue']
user_modify.truename = form.get('truename', '')
user_modify.e_mail = form.get('e_mail', '')
user_modify.department = form.get('department', '')
user_modify.student_number = form.get('student_number', '')
user_modify.tel = form.get('tel', '')
user_modify.user_group = form.get('group', '')
user_modify.auth_method = form.get('auth_method', '')
if (user_modify.status == 'applying' and form.get('status', '') == 'normal'):
send_activated_email(user_modify.e_mail, user_modify.username)
user_modify.status = form.get('status', '')
#if (form.get('password', '') != ''):
#new_password = form.get('password','')
#new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
#user_modify.password = new_password
#self.chpassword(cur_user = user_modify, password = form.get('password','no_password'))
#modify password in another function now
db.session.commit()
res = self.groupQuery(name=user_modify.user_group)
if res['success']:
self.set_nfs_quota(user_modify.username,res['data']['data'])
return {"success":'true'}
#except:
#return {"success":'false', "reason":"Something happened"}
@token_required
def chpassword(*args, **kwargs):
'''
Usage: chpassword(cur_user = token_from_auth, password = 'your_password')
'''
cur_user = kwargs['cur_user']
cur_user.password = hashlib.sha512(kwargs['password'].encode('utf-8')).hexdigest()
def newuser(*args, **kwargs):
'''
Usage : newuser()
The only method to create a new user
call this method first, modify the return value which is a database row instance,then call self.register()
'''
user_new = User('newuser', 'asdf1234')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
user_new.user_group = quotas['default']
user_new.avatar = 'default.png'
return user_new
def register(self, *args, **kwargs):
'''
Usage: register(user = modified_from_newuser())
'''
if (kwargs['user'].username == None or kwargs['user'].username == ''):
return {"success":'false', "reason": "Empty username"}
user_check = User.query.filter_by(username = kwargs['user'].username).first()
if (user_check != None and user_check.status != "init"):
#for the activating form
return {"success":'false', "reason": "Unauthorized action"}
newuser = kwargs['user']
if (user_check != None and (user_check.status == "init")):
db.session.delete(user_check)
db.session.commit()
else:
newuser.password = hashlib.sha512(newuser.password.encode('utf-8')).hexdigest()
db.session.add(newuser)
db.session.commit()
# if newuser status is normal, init some data for this user
# now initialize for all kind of users
#if newuser.status == 'normal':
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/master/userinit.sh", newuser.username])
res = self.groupQuery(name=newuser.user_group)
if res['success']:
self.set_nfs_quota(newuser.username,res['data']['data'])
return {"success":'true'}
@administration_required
def quotaadd(*args, **kwargs):
form = kwargs.get('form')
quotaname = form.get("quotaname")
default_value = form.get("default_value")
hint = form.get("hint")
if (quotaname == None):
return { "success":'false', "reason": "Empty quota name"}
if (default_value == None):
default_value = "--"
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
group['quotas'][quotaname] = default_value
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['quotainfo'].append({'name':quotaname, 'hint':hint})
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return {"success":'true'}
@administration_required
def groupadd(*args, **kwargs):
form = kwargs.get('form')
groupname = form.get("groupname")
if (groupname == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = {
'name': groupname,
'quotas': {}
}
for key in form.keys():
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group['quotas'][key] = val
else:
group['quotas'][key] = form.get(key)
groups.append(group)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def groupdel(*args, **kwargs):
name = kwargs.get('name', None)
if (name == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == name:
groups.remove(group)
break
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def lxcsettingList(*args, **kwargs):
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'r')
lxcsetting = json.loads(lxcsettingfile.read())
lxcsettingfile.close()
return {"success": 'true', 'data':lxcsetting}
@administration_required
def chlxcsetting(*args, **kwargs):
form = kwargs['form']
lxcsetting = {}
lxcsetting['cpu'] = form['lxcCpu']
lxcsetting['memory'] = form['lxcMemory']
lxcsetting['disk'] = form['lxcDisk']
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'w')
lxcsettingfile.write(json.dumps(lxcsetting))
lxcsettingfile.close()
return {"success": 'true'}
def queryForDisplay(*args, **kwargs):
'''
Usage: queryForDisplay(user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'user' not in kwargs):
return {"success":'false', "reason":"Cannot get 'user'"}
user = kwargs['user']
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"auth_method": user.auth_method,
}
}
return result
# def usermodify(rowID, columnID, newValue, cur_user):
# '''not used now'''
# user = um.query(ID = request.form["rowID"], cur_user = root).get('token', None)
# result = um.modify(user = user, columnID = request.form["columnID"], newValue = request.form["newValue"], cur_user = root)
# return json.dumps(result)
| |
"""The Intel MPI Benchmarks
https://software.intel.com/en-us/articles/intel-mpi-benchmarks
"""
from abc import abstractmethod, abstractproperty
from operator import itemgetter
import re
from cached_property import cached_property
from hpcbench.api import Benchmark, Metrics, MetricsExtractor
from hpcbench.toolbox.process import find_executable
class IMBExtractor(MetricsExtractor):
def __init__(self):
self.with_all_data = False
"""Abstract class for IMB benchmark metrics extractor
"""
@cached_property
def metrics(self):
common = dict(
minb_lat=Metrics.Microsecond,
minb_lat_bytes=Metrics.Byte,
min_lat=Metrics.Microsecond,
min_lat_bytes=Metrics.Byte,
maxb_bw=Metrics.MegaBytesPerSecond,
maxb_bw_bytes=Metrics.Byte,
max_bw=Metrics.MegaBytesPerSecond,
max_bw_bytes=Metrics.Byte,
)
if self.with_all_data:
common.update(
raw=[
dict(
bytes=Metrics.Byte,
bandwidth=Metrics.MegaBytesPerSecond,
latency=Metrics.Microsecond,
)
]
)
return common
@abstractproperty
def stdout_ignore_prior(self):
"""Ignore stdout until this line"""
@cached_property
def metrics_names(self):
"""get metrics names"""
return set(self.metrics)
def extract_metrics(self, metas):
# parse stdout and extract desired metrics
self.prelude()
with open(self.stdout) as istr:
for line in istr:
if line.strip() == self.stdout_ignore_prior:
break
for line in istr:
self.process_line(line.strip())
return self.epilog()
@abstractmethod
def process_line(self, line):
"""Process a line
"""
def prelude(self):
"""method called before extracting metrics"""
@abstractmethod
def epilog(self):
""":return: extracted metrics as a dictionary
"""
class IMBPingPongExtractor(IMBExtractor):
"""Metrics extractor for PingPong IMB benchmark"""
LATENCY_BANDWIDTH_RE = re.compile(r'^\s*(\d+)\s+\d+\s+(\d*\.?\d+)[\s]+(\d*\.?\d+)')
def __init__(self):
super(IMBPingPongExtractor, self).__init__()
self.s_bytes = []
self.s_latency = []
self.s_bandwidth = []
self.with_all_data = True
def prelude(self):
self.s_bytes = []
self.s_latency = []
self.s_bandwidth = []
@cached_property
def stdout_ignore_prior(self):
return "# Benchmarking PingPong"
def process_line(self, line):
search = self.LATENCY_BANDWIDTH_RE.search(line)
if search:
byte = int(search.group(1))
if byte != 0:
lat = float(search.group(2))
bw = float(search.group(3))
self.s_bytes.append(byte)
self.s_latency.append(lat)
self.s_bandwidth.append(bw)
def epilog(self):
minb_lat, minb_lat_b = self.s_latency[0], self.s_bytes[0]
min_lat, min_lat_b = min(zip(self.s_latency, self.s_bytes), key=itemgetter(0))
maxb_bw, maxb_bw_b = self.s_bandwidth[-1], self.s_bytes[-1]
max_bw, max_bw_b = max(zip(self.s_bandwidth, self.s_bytes), key=itemgetter(0))
raw = []
for i in range(len(self.s_bytes)):
raw.append(
dict(
bytes=self.s_bytes[i],
latency=self.s_latency[i],
bandwidth=self.s_bandwidth[i],
)
)
return dict(
minb_lat=minb_lat,
minb_lat_bytes=minb_lat_b,
min_lat=min_lat,
min_lat_bytes=min_lat_b,
maxb_bw=maxb_bw,
maxb_bw_bytes=maxb_bw_b,
max_bw=max_bw,
max_bw_bytes=max_bw_b,
raw=raw,
)
class IMBAllToAllExtractor(IMBExtractor):
"""Metrics extractor for AllToAll IMB benchmark"""
TIME_RE = re.compile(r'^\s*(\d+)\s+\d+\s+\d*\.?\d+[\s]+\d*\.?\d+[\s]+(\d*\.?\d+)')
def __init__(self):
super(IMBAllToAllExtractor, self).__init__()
self.s_bytes = []
self.s_latency = []
self.s_bandwidth = []
def prelude(self):
self.s_bytes = []
self.s_latency = []
self.s_bandwidth = []
@cached_property
def stdout_ignore_prior(self):
return "# Benchmarking Alltoallv"
def process_line(self, line):
search = self.TIME_RE.search(line)
if search:
byte = int(search.group(1))
if byte != 0:
usec = float(search.group(2))
bw = round((byte / 1024.0 ** 2) / (usec / 1.0e6), 2)
self.s_bytes.append(byte)
self.s_latency.append(usec)
self.s_bandwidth.append(bw)
def epilog(self):
minb_lat, minb_lat_b = self.s_latency[0], self.s_bytes[0]
min_lat, min_lat_b = min(zip(self.s_latency, self.s_bytes), key=itemgetter(0))
maxb_bw, maxb_bw_b = self.s_bandwidth[-1], self.s_bytes[-1]
max_bw, max_bw_b = max(zip(self.s_bandwidth, self.s_bytes), key=itemgetter(0))
return dict(
minb_lat=minb_lat,
minb_lat_bytes=minb_lat_b,
min_lat=min_lat,
min_lat_bytes=min_lat_b,
maxb_bw=maxb_bw,
maxb_bw_bytes=maxb_bw_b,
max_bw=max_bw,
max_bw_bytes=max_bw_b,
)
class IMBAllGatherExtractor(IMBAllToAllExtractor):
"""Metrics extractor for AllGather IMB benchmark"""
def __init__(self):
super(IMBAllGatherExtractor, self).__init__()
@cached_property
def stdout_ignore_prior(self):
return "# Benchmarking Allgather"
class IMB(Benchmark):
"""Provides latency/bandwidth of the network.
the `srun_nodes` does not apply to the PingPong benchmark.
"""
DEFAULT_EXECUTABLE = 'IMB-MPI1'
PING_PONG = 'PingPong'
ALL_TO_ALL = 'Alltoallv'
ALL_GATHER = 'Allgather'
DEFAULT_CATEGORIES = [PING_PONG, ALL_TO_ALL, ALL_GATHER]
DEFAULT_ARGUMENTS = {
ALL_GATHER: ["-npmin", "{process_count}"],
ALL_TO_ALL: ["-npmin", "{process_count}"],
}
NODE_PAIRING = {'node', 'tag'}
DEFAULT_NODE_PAIRING = 'node'
def __init__(self):
super(IMB, self).__init__(
attributes=dict(
executable=IMB.DEFAULT_EXECUTABLE,
categories=IMB.DEFAULT_CATEGORIES,
arguments=IMB.DEFAULT_ARGUMENTS,
srun_nodes=0,
node_pairing=IMB.DEFAULT_NODE_PAIRING,
)
)
name = 'imb'
@cached_property
def executable(self):
"""Get path to Intel MPI Benchmark executable
"""
return self.attributes['executable']
@property
def categories(self):
"""List of IMB benchmarks to test"""
return self.attributes['categories']
@property
def arguments(self):
"""Dictionary providing the list of arguments for every
benchmark"""
return self.attributes['arguments']
@property
def srun_nodes(self):
"""Number of nodes the benchmark (other than PingPong)
must be executed on"""
return self.attributes['srun_nodes']
@property
def node_pairing(self):
"""if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
"""
value = self.attributes['node_pairing']
if value not in IMB.NODE_PAIRING:
msg = 'Unexpected {0} value: got "{1}" but valid values are {2}'
msg = msg.format('node_pairing', value, IMB.NODE_PAIRING)
raise ValueError(msg)
return value
def _node_pairs(self, context):
if self.node_pairing == 'node':
return context.cluster.node_pairs
elif self.node_pairing == 'tag':
return context.cluster.tag_node_pairs
assert False
def execution_matrix(self, context):
for category in self.categories:
arguments = self.arguments.get(category) or []
if category == IMB.PING_PONG:
for pair in self._node_pairs(context):
yield dict(
category=category,
command=[
find_executable(self.executable, required=False),
category,
]
+ arguments,
srun_nodes=pair,
metas=dict(from_node=pair[0], to_node=pair[1]),
)
else:
yield dict(
category=category,
command=[find_executable(self.executable, required=False), category]
+ list(arguments),
srun_nodes=self.srun_nodes,
)
@cached_property
def metrics_extractors(self):
return {
IMB.PING_PONG: IMBPingPongExtractor(),
IMB.ALL_TO_ALL: IMBAllToAllExtractor(),
IMB.ALL_GATHER: IMBAllGatherExtractor(),
}
| |
"""
Module for manipulating the tree structure of collections
Definitions:
- Tree: the root node being handled)
- Branch: a branch inside the tree that contains other collections)
- Leaf branch: the last branch that contains test collections
Example tree:
Abstract view:
root ("root", "tree", "branch", "collection")
/ \ (root contains plot title)
| |
(possible more levels) ("branch", "collection")
/ \
linkrate: 10 mbit 20 mbit ("branch", "leaf branch", "collection")
/ \ / \
rtt: 2 ms 10 ms 2 ms 10 ms ("collection", "leaf collection")
| | | | (only one collection inside leaf branches)
| | | |
test test test test ("test")
(only one test in leaf collections)
The reason for having tests as children similar as normal
branches is to allow easy manipulation of the tree, e.g.
swapping levels.
Actual structure:
{
'title': 'Plot title',
'titlelabel': '',
'subtitle': '',
'children': [
{
'title': '10 Mb/s',
'titlelabel': 'Linkrate',
'subtitle': '',
'children': [
{
'title': '2',
'titlelabel': 'RTT',
'subtitle': '',
'children': [
{'testcase': 'results/plot-tree/linkrate-10/rtt-2/test'}
],
},
{
'title': '10',
'titlelabel': 'RTT',
'subtitle': '',
'children': [
{'testcase': 'results/plot-tree/linkrate-10/rtt-10/test'}
],
},
],
},
{
'title': '20 Mb/s',
'titlelabel': 'Linkrate',
'subtitle': '',
'children': [
{
'title': '2',
'titlelabel': 'RTT',
'subtitle': '',
'children': [
{'testcase': 'results/plot-tree/linkrate-20/rtt-2/test'}
]
},
{
'title': '10',
'titlelabel': 'RTT',
'subtitle': '',
'children': [
{'testcase': 'results/plot-tree/linkrate-20/rtt-10/test'}
]
},
],
},
],
}
X offsets:
X offsets in the tree are increased so that they cause natural
gaps betweep test branches. So between branches at a deep level
there is a small gap, while close to the root branch there will
be more gap.
In the example above the tests would have the following x offsets
- test 1: 0
- test 2: 1
- test 3: 3 (new branch, so x is increased to form a gap)
- test 4: 4
"""
from collections import OrderedDict
def get_depth_sizes(tree):
"""
Calculate the number of branches at each tree level
"""
depths = {}
def check_node(item, x, depth):
if depth not in depths:
depths[depth] = 0
depths[depth] += 1
walk_tree(tree, check_node)
return depths
def walk_leaf(tree, fn):
"""
Walks the tree and calls fn for every leaf branch
The arguments to fn:
- object: the leaf branch
- bool: true if first leaf branch in tree
- number: the x offset of this leaf branch
"""
x = 0
is_first = True
def walk(branch):
nonlocal is_first, x
if len(branch['children']) == 0:
return
first_child = branch['children'][0]
is_leaf_branch = 'testcase' in branch['children'][0]['children'][0]
if is_leaf_branch:
fn(branch, is_first, x)
is_first = False
x += len(branch['children'])
# or is it a collection of collections
else:
for item in branch['children']:
walk(item)
x += 1
walk(tree)
def walk_tree_reverse(tree, fn):
"""
Walks the tree and calls fn for every branch in reverse order
The arguments to fn:
- object: the branch
- number: the x offset of this branch
- number: depth of this branch, 0 being root
- number: the number of tests inside this branch
"""
x = 0
def walk(branch, depth=0):
nonlocal x
is_leaf_branch = 'testcase' in branch['children'][0]['children'][0]
if is_leaf_branch:
x += len(branch['children'])
# or else it is a non-leaf branch
else:
for item in branch['children']:
y = x
walk(item, depth + 1)
fn(item, y, depth, x - y)
x += 1
walk(tree, 0)
def walk_tree(tree, fn, include_leaf_collection=False):
"""
Walks the tree and calls fn for every branch, and also for every
leaf collection if include_leaf_collection is True.
The arguments given to fn:
- object: the collection
- number: the x offset related to number of tests/levels
- number: depth of this collection, 0 being root
"""
x = 0
def walk(collection, depth=0):
nonlocal x
for subcollection in collection['children']:
fn(subcollection, x, depth)
if include_leaf_collection:
is_leaf_collection = 'testcase' in subcollection['children'][0]
if is_leaf_collection:
x += 1
continue
# If input to walk_tree was a leaf branch, we can't look
# if we have leaf branch inside
elif 'children' not in subcollection['children'][0]:
continue
else:
is_leaf_branch = 'testcase' in subcollection['children'][0]['children'][0]
if is_leaf_branch:
x += len(subcollection['children']) + 1
continue
walk(subcollection, depth + 1)
x += 1
walk(tree)
def swap_levels(tree, level=0):
"""
Rearrange vertical position of elements in the tree.
This swaps collections in the tree so their level
in the tree is changed.
For the plotting, this will change the way tests
are grouped and presented.
"""
if level > 0:
def walk(branch, depth):
if len(branch['children']) == 0:
return
# is this a set of tests?
if 'testcase' in branch['children'][0]:
return
for index, item in enumerate(branch['children']):
if depth + 1 == level:
branch['children'][index] = swap_levels(item)
else:
walk(item, depth + 1)
walk(tree, 0)
return tree
titles = []
def check_level(node, x, depth):
nonlocal titles
if depth == 1 and node['title'] not in titles:
titles.append(node['title'])
walk_tree(tree, check_level, include_leaf_collection=True)
if len(titles) == 0:
return tree
new_children = OrderedDict()
parent = None
def build_swap(node, x, depth):
nonlocal parent, new_children
if depth == 0:
parent = node
elif depth == 1:
parentcopy = dict(parent)
if node['title'] in new_children:
new_children[node['title']]['children'].append(parentcopy)
else:
childcopy = dict(node)
childcopy['children'] = [parentcopy]
new_children[node['title']] = childcopy
parentcopy['children'] = node['children']
walk_tree(tree, build_swap, include_leaf_collection=True)
tree['children'] = [val for key, val in new_children.items()]
return tree
def build_swap_list(level_order):
"""
Build a list of levels that should be swapped to achieve
a specific ordering of levels.
"""
# assert the values
distinct = []
for val in level_order:
if val in distinct:
raise Exception("Duplicate value: %s" % val)
if not isinstance(val, int):
raise Exception("Invalid type: %s" % val)
if val < 0:
raise Exception("Value out of bounds: %s" % val)
distinct.append(val)
# fill any missing values
for i in range(max(level_order)):
if i not in level_order:
level_order.append(i)
# work through the list and build a swap list
swap_list = []
to_process = list(range(len(level_order))) # same as an sorted version of the list
for i in range(len(level_order)):
# find offset of this target
to_swap = 0
while level_order[i] != to_process[to_swap]:
to_swap += 1
# pull up the target so it become the current level
for x in range(to_swap):
swap_list.append(i + (to_swap - x - 1))
# remove the level we targeted
to_process.remove(level_order[i])
return swap_list
def reorder_levels(tree, level_order=None):
"""
Order the tree based on an ordering of levels
(number of branches in height in the tree)
E.g. a tree of 3 levels where we want to reorder the levels
so that the order is last level, then the first and then the
second:
level_order=[2,0,1]
Example reversing the order of three levels:
level_order=[2,1,0]
"""
if level_order is None or len(level_order) == 0:
return tree
# get the depth of the tree only counting branches
levels = len(get_depth_sizes(tree))
swap_list = build_swap_list(level_order)
if len(swap_list) > 0 and max(swap_list) >= levels:
raise Exception("Out of bound level: %d. Only have %d levels" % (max(swap_list), levels))
# apply the calculated node swapping to the tree
for level in swap_list:
tree = swap_levels(tree, level)
return tree
def skip_levels(tree, number_of_levels):
"""
Select the left node number_of_levels deep and
return the new tree
"""
# allow to select specific branches in a three instead of default first
if type(number_of_levels) is list:
for branch in number_of_levels:
tree = tree['children'][branch]
return tree
while number_of_levels > 0:
tree = tree['children'][0]
number_of_levels -= 1
return tree
| |
from __future__ import division, absolute_import, print_function
import sys
import warnings
import functools
import operator
import pytest
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
HAS_REFCOUNT, suppress_warnings,
)
class TestIndexing(object):
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero(object):
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike(object):
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike(object):
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.)
class TestFieldIndexing(object):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments(object):
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses(object):
def test_basic(self):
class SubClass(np.ndarray):
pass
s = np.arange(5).view(SubClass)
assert_(isinstance(s[:3], SubClass))
assert_(s[:3].base is s)
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
def test_matrix_fancy(self):
# The matrix class messes with the shape. While this is always
# weird (getitem is not used, it does not have setitem nor knows
# about fancy indexing), this tests gh-3110
m = np.matrix([[1, 2], [3, 4]])
assert_(isinstance(m[[0,1,0], :], np.matrix))
# gh-3110. Note the transpose currently because matrices do *not*
# support dimension fixing for fancy indexing correctly.
x = np.asmatrix(np.arange(50).reshape(5,10))
assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_slice_decref_getsetslice(self):
# See gh-10066, a temporary slice object should be discarted.
# This test is only really interesting on Python 2 since
# it goes through `__set/getslice__` here and can probably be
# removed. Use 0:7 to make sure it is never None:7.
class KeepIndexObject(np.ndarray):
def __getitem__(self, indx):
self.indx = indx
if indx == slice(0, 7):
raise ValueError
def __setitem__(self, indx, val):
self.indx = indx
if indx == slice(0, 4):
raise ValueError
k = np.array([1]).view(KeepIndexObject)
k[0:5]
assert_equal(k.indx, slice(0, 5))
assert_equal(sys.getrefcount(k.indx), 2)
try:
k[0:7]
raise AssertionError
except ValueError:
# The exception holds a reference to the slice so clear on Py2
if hasattr(sys, 'exc_clear'):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sys.exc_clear()
assert_equal(k.indx, slice(0, 7))
assert_equal(sys.getrefcount(k.indx), 2)
k[0:3] = 6
assert_equal(k.indx, slice(0, 3))
assert_equal(sys.getrefcount(k.indx), 2)
try:
k[0:4] = 2
raise AssertionError
except ValueError:
# The exception holds a reference to the slice so clear on Py2
if hasattr(sys, 'exc_clear'):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sys.exc_clear()
assert_equal(k.indx, slice(0, 4))
assert_equal(sys.getrefcount(k.indx), 2)
class TestFancyIndexingCast(object):
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence(object):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated(object):
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
indx = np.array(indx, dtype=np.intp)
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
with warnings.catch_warnings():
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument(object):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing(object):
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
class TestArrayToIndexDeprecation(object):
"""Creating an an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike(object):
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError(object):
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess(object):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
| |
import dis
import re
import sys
from io import StringIO
import unittest
from math import copysign
def disassemble(func):
f = StringIO()
tmp = sys.stdout
sys.stdout = f
try:
dis.dis(func)
finally:
sys.stdout = tmp
result = f.getvalue()
f.close()
return result
def dis_single(line):
return disassemble(compile(line, '', 'single'))
class TestTranforms(unittest.TestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
asm = disassemble(unot)
for elem in ('UNARY_NOT', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
for elem in ('POP_JUMP_IF_TRUE',):
self.assertIn(elem, asm)
def test_elim_inversion_of_is_or_in(self):
for line, elem in (
('not a is b', '(is not)',),
('not a in b', '(not in)',),
('not a is not b', '(is)',),
('not a not in b', '(in)',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f(x):
None
None
return x
def g(x):
True
return x
def h(x):
False
return x
for func, name in ((f, 'None'), (g, 'True'), (h, 'False')):
asm = disassemble(func)
for elem in ('LOAD_GLOBAL',):
self.assertNotIn(elem, asm)
for elem in ('LOAD_CONST', '('+name+')'):
self.assertIn(elem, asm)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertIn('LOAD_CONST', disassemble(f))
self.assertNotIn('LOAD_GLOBAL', disassemble(f))
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
asm = disassemble(f)
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotIn(elem, asm)
for elem in ('JUMP_ABSOLUTE',):
self.assertIn(elem, asm)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
self.assertNotIn('UNPACK_TUPLE', asm)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', '((1, 2, 3))'),
('("a","b","c")', "(('a', 'b', 'c'))"),
('a,b,c = 1,2,3', '((1, 2, 3))'),
('(None, 1, None)', '((None, 1, None))'),
('((1, 2), 3, 4)', '(((1, 2), 3, 4))'),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_TUPLE', asm)
# Long tuples should be folded too.
asm = dis_single(repr(tuple(range(10000))))
# One LOAD_CONST for the tuple, one for the None return value
self.assertEqual(asm.count('LOAD_CONST'), 2)
self.assertNotIn('BUILD_TUPLE', asm)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', '(1, 2, 3)'),
('a not in ["a","b","c"]', "(('a', 'b', 'c'))"),
('a in [None, 1, None]', '((None, 1, None))'),
('a not in [(1, 2), 3, 4]', '(((1, 2), 3, 4))'),
):
asm = dis_single(line)
self.assertIn(elem, asm)
self.assertNotIn('BUILD_LIST', asm)
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
asm = dis_single(line)
self.assertNotIn('BUILD_SET', asm)
# Verify that the frozenset 'elem' is in the disassembly
# The ordering of the elements in repr( frozenset ) isn't
# guaranteed, so we jump through some hoops to ensure that we have
# the frozenset we expect:
self.assertIn('frozenset', asm)
# Extract the frozenset literal from the disassembly:
m = re.match(r'.*(frozenset\({.*}\)).*', asm, re.DOTALL)
self.assertTrue(m)
self.assertEqual(eval(m.group(1)), elem)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.assertTrue(not g(3))
self.assertTrue(g(4))
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', '(9)'), # chained fold
('"@"*4', "('@@@@')"), # check string ops
('a="abc" + "def"', "('abcdef')"), # check string ops
('a = 3**4', '(81)'), # binary power
('a = 3*4', '(12)'), # binary multiply
('a = 13//4', '(3)'), # binary floor divide
('a = 14%4', '(2)'), # binary modulo
('a = 2+3', '(5)'), # binary add
('a = 13-4', '(9)'), # binary subtract
('a = (12,13)[1]', '(13)'), # binary subscr
('a = 13 << 2', '(52)'), # binary lshift
('a = 13 >> 2', '(3)'), # binary rshift
('a = 13 & 7', '(5)'), # binary and
('a = 13 ^ 7', '(10)'), # binary xor
('a = 13 | 7', '(15)'), # binary or
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('BINARY_', asm)
# Verify that unfoldables are skipped
asm = dis_single('a=2+"b"')
self.assertIn('(2)', asm)
self.assertIn("('b')", asm)
# Verify that large sequences do not result from folding
asm = dis_single('a="x"*1000')
self.assertIn('(1000)', asm)
def test_binary_subscr_on_unicode(self):
# valid code get optimized
asm = dis_single('"foo"[0]')
self.assertIn("('f')", asm)
self.assertNotIn('BINARY_SUBSCR', asm)
asm = dis_single('"\u0061\uffff"[1]')
self.assertIn("('\\uffff')", asm)
self.assertNotIn('BINARY_SUBSCR', asm)
asm = dis_single('"\U00012345abcdef"[3]')
self.assertIn("('c')", asm)
self.assertNotIn('BINARY_SUBSCR', asm)
# invalid code doesn't get optimized
# out of range
asm = dis_single('"fuu"[10]')
self.assertIn('BINARY_SUBSCR', asm)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', '(-0.5)'), # unary negative
('-0.0', '(-0.0)'), # -0.0
('-(1.0-1.0)','(-0.0)'), # -0.0 after folding
('-0', '(0)'), # -0
('~-2', '(1)'), # unary invert
('+1', '(1)'), # unary positive
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertNotIn('UNARY_', asm)
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
self.assertNotIn('UNARY_', disassemble(negzero))
self.assertTrue(copysign(1.0, negzero()) < 0)
# Verify that unfoldables are skipped
for line, elem in (
('-"abc"', "('abc')"), # unary negative
('~"abc"', "('abc')"), # unary invert
):
asm = dis_single(line)
self.assertIn(elem, asm, asm)
self.assertIn('UNARY_', asm)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
asm = disassemble(f)
self.assertNotIn('LOAD_CONST', asm)
self.assertNotIn('(None)', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
self.assertNotIn('JUMP_ABSOLUTE', asm)
self.assertEqual(asm.split().count('RETURN_VALUE'), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
asm = disassemble(f)
self.assertNotIn('JUMP_FORWARD', asm)
# There should be one jump for the while loop.
self.assertEqual(asm.split().count('JUMP_ABSOLUTE'), 1)
self.assertEqual(asm.split().count('RETURN_VALUE'), 2)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
asm = disassemble(f)
self.assertNotIn('BINARY_ADD', asm)
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
"3 * -5",
"-3 * 5",
"2 * (3 * 4)",
"(2 * 3) * 4",
"(-1, 2, 3)",
"(1, -2, 3)",
"(1, 2, -3)",
"(1, 2, -3) * 6",
"lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}",
]
for e in exprs:
asm = dis_single(e)
self.assertNotIn('UNARY_', asm, e)
self.assertNotIn('BINARY_', asm, e)
self.assertNotIn('BUILD_', asm, e)
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
def test_main(verbose=None):
import sys
from test import support
test_classes = (TestTranforms, TestBuglets)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tasks_operations import build_create_request_initial, build_delete_request_initial, build_get_details_request, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TasksOperations:
"""TasksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2018_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TaskListResult"]:
"""Lists all the tasks for a specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TaskListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2018_09_01.models.TaskListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TaskListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TaskListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
**kwargs: Any
) -> "_models.Task":
"""Get the properties of a specified task.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_name: The name of the container registry task.
:type task_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Task, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2018_09_01.models.Task
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
task_create_parameters: "_models.Task",
**kwargs: Any
) -> "_models.Task":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(task_create_parameters, 'Task')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Task', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
task_create_parameters: "_models.Task",
**kwargs: Any
) -> AsyncLROPoller["_models.Task"]:
"""Creates a task for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_name: The name of the container registry task.
:type task_name: str
:param task_create_parameters: The parameters for creating a task.
:type task_create_parameters: ~azure.mgmt.containerregistry.v2018_09_01.models.Task
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Task or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2018_09_01.models.Task]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
task_create_parameters=task_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a specified task.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_name: The name of the container registry task.
:type task_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
task_update_parameters: "_models.TaskUpdateParameters",
**kwargs: Any
) -> "_models.Task":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(task_update_parameters, 'TaskUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Task', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
task_update_parameters: "_models.TaskUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Task"]:
"""Updates a task with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_name: The name of the container registry task.
:type task_name: str
:param task_update_parameters: The parameters for updating a task.
:type task_update_parameters:
~azure.mgmt.containerregistry.v2018_09_01.models.TaskUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Task or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2018_09_01.models.Task]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
task_update_parameters=task_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}'} # type: ignore
@distributed_trace_async
async def get_details(
self,
resource_group_name: str,
registry_name: str,
task_name: str,
**kwargs: Any
) -> "_models.Task":
"""Returns a task with extended information that includes all secrets.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param task_name: The name of the container registry task.
:type task_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Task, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2018_09_01.models.Task
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
task_name=task_name,
template_url=self.get_details.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}/listDetails'} # type: ignore
| |
#!/usr/bin/env python3
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
__all__ = ['Trace', 'CoverageResults']
import argparse
import linecache
import os
import re
import sys
import token
import tokenize
import inspect
import gc
import dis
import pickle
from time import monotonic as _time
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
self._dirs = [] if not dirs else [os.path.normpath(d)
for d in dirs]
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list.
if modulename in self._mods: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
for mod in self._mods:
# Need to take some care since ignoring
# "cmp" mustn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
if modulename.startswith(mod + '.'):
self._ignore[modulename] = 1
return 1
# Now check that filename isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def _modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def _fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
with open(self.infile, 'rb') as f:
counts, calledfuncs, callers = pickle.load(f)
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
def is_ignored_filename(self, filename):
"""Return True if the filename does not refer to a file
we want to have reported.
"""
return ((filename.startswith('<') and filename.endswith('>')) or
# XXX PyPy freezes some (pure-Python) modules at
# translation-time. These contain filenames starting with
# "<builtin>/" instead of their actual filenames. Ignore them
# for now.
filename.startswith("<builtin>/"))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts:
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs:
calledfuncs[key] = 1
for key in other_callers:
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
Write the coverage results.
:param show_missing: Show lines that had no hits.
:param summary: Include coverage summary per module.
:param coverdir: If None, the results of each module are placed in its
directory, otherwise it is included in the directory
specified.
"""
if self.calledfuncs:
print()
print("functions called:")
calls = self.calledfuncs
for filename, modulename, funcname in sorted(calls):
print(("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname)))
if self.callers:
print()
print("calling relationships:")
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
in sorted(self.callers):
if pfile != lastfile:
print()
print("***", pfile, "***")
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print(" -->", cfile)
lastcfile = cfile
print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts:
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.items():
if self.is_ignored_filename(filename):
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = _modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = _fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
if lnotab:
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count, encoding)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
print("lines cov% module (path)")
for m in sorted(sums):
n_lines, percent, modulename, filename = sums[m]
print("%5d %3d%% %s (%s)" % sums[m])
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w", encoding=encoding)
except OSError as err:
print(("trace: Could not open %r for writing: %s"
"- skipping" % (path, err)), file=sys.stderr)
return 0, 0
n_lines = 0
n_hits = 0
with outfile:
for lineno, line in enumerate(lines, 1):
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in line:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(line.expandtabs(8))
return n_hits, n_lines
def _find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def _find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = _find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(_find_lines(c, strs))
return linenos
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
def _find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
with tokenize.open(filename) as f:
prog = f.read()
encoding = f.encoding
except OSError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)
return {}
code = compile(prog, filename, "exec")
strs = _find_strings(filename, encoding)
return _find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = _Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = _time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec(cmd, globals, locals)
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 0:
# PyPy may store functions directly on the class
# (more exactly: the container is not a Python object)
dicts = funcs
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX _modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = _modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print((" --- modulename: %s, funcname: %s"
% (modulename, code.co_name)))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='trace 2.0')
grp = parser.add_argument_group('Main options',
'One of these (or --report) must be given')
grp.add_argument('-c', '--count', action='store_true',
help='Count the number of times each line is executed and write '
'the counts to <module>.cover for each module executed, in '
'the module\'s directory. See also --coverdir, --file, '
'--no-report below.')
grp.add_argument('-t', '--trace', action='store_true',
help='Print each line to sys.stdout before it is executed')
grp.add_argument('-l', '--listfuncs', action='store_true',
help='Keep track of which functions are executed at least once '
'and write the results to sys.stdout after the program exits. '
'Cannot be specified alongside --trace or --count.')
grp.add_argument('-T', '--trackcalls', action='store_true',
help='Keep track of caller/called pairs and write the results to '
'sys.stdout after the program exits.')
grp = parser.add_argument_group('Modifiers')
_grp = grp.add_mutually_exclusive_group()
_grp.add_argument('-r', '--report', action='store_true',
help='Generate a report from a counts file; does not execute any '
'code. --file must specify the results file to read, which '
'must have been created in a previous run with --count '
'--file=FILE')
_grp.add_argument('-R', '--no-report', action='store_true',
help='Do not generate the coverage report files. '
'Useful if you want to accumulate over several runs.')
grp.add_argument('-f', '--file',
help='File to accumulate counts over several runs')
grp.add_argument('-C', '--coverdir',
help='Directory where the report files go. The coverage report '
'for <package>.<module> will be written to file '
'<dir>/<package>/<module>.cover')
grp.add_argument('-m', '--missing', action='store_true',
help='Annotate executable lines that were not executed with '
'">>>>>> "')
grp.add_argument('-s', '--summary', action='store_true',
help='Write a brief summary for each file to sys.stdout. '
'Can only be used with --count or --report')
grp.add_argument('-g', '--timing', action='store_true',
help='Prefix each line with the time since the program started. '
'Only used while tracing')
grp = parser.add_argument_group('Filters',
'Can be specified multiple times')
grp.add_argument('--ignore-module', action='append', default=[],
help='Ignore the given module(s) and its submodules'
'(if it is a package). Accepts comma separated list of '
'module names.')
grp.add_argument('--ignore-dir', action='append', default=[],
help='Ignore files in the given directory '
'(multiple directories can be joined by os.pathsep).')
parser.add_argument('filename', nargs='?',
help='file to run as main program')
parser.add_argument('arguments', nargs=argparse.REMAINDER,
help='arguments to the program')
opts = parser.parse_args()
if opts.ignore_dir:
rel_path = 'lib', 'python{0.major}.{0.minor}'.format(sys.version_info)
_prefix = os.path.join(sys.base_prefix, *rel_path)
_exec_prefix = os.path.join(sys.base_exec_prefix, *rel_path)
def parse_ignore_dir(s):
s = os.path.expanduser(os.path.expandvars(s))
s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix)
return os.path.normpath(s)
opts.ignore_module = [mod.strip()
for i in opts.ignore_module for mod in i.split(',')]
opts.ignore_dir = [parse_ignore_dir(s)
for i in opts.ignore_dir for s in i.split(os.pathsep)]
if opts.report:
if not opts.file:
parser.error('-r/--report requires -f/--file')
results = CoverageResults(infile=opts.file, outfile=opts.file)
return results.write_results(opts.missing, opts.summary, opts.coverdir)
if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]):
parser.error('must specify one of --trace, --count, --report, '
'--listfuncs, or --trackcalls')
if opts.listfuncs and (opts.count or opts.trace):
parser.error('cannot specify both --listfuncs and (--trace or --count)')
if opts.summary and not opts.count:
parser.error('--summary can only be used with --count or --report')
if opts.filename is None:
parser.error('filename is missing: required with the main options')
sys.argv = opts.filename, *opts.arguments
sys.path[0] = os.path.dirname(opts.filename)
t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs,
countcallers=opts.trackcalls, ignoremods=opts.ignore_module,
ignoredirs=opts.ignore_dir, infile=opts.file,
outfile=opts.file, timing=opts.timing)
try:
with open(opts.filename) as fp:
code = compile(fp.read(), opts.filename, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': opts.filename,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except OSError as err:
sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not opts.no_report:
results.write_results(opts.missing, opts.summary, opts.coverdir)
if __name__=='__main__':
main()
| |
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import weakref
# ============= standard library imports ========================
from itertools import groupby
import six
from apptools.preferences.preference_binding import bind_preference
from six.moves import map
from traits.api import Property, Event, cached_property, Any, Int, Str
from traits.has_traits import provides
from pychron.core.helpers.iterfuncs import partition
# ============= local library imports ==========================
from pychron.core.i_datastore import IDatastore
from pychron.core.progress import progress_loader, CancelLoadingError
from pychron.database.adapters.isotope_adapter import IsotopeAdapter
from pychron.database.orms.isotope.meas import meas_AnalysisTable
from pychron.experiment.utilities.identifier import make_runid
from pychron.loggable import Loggable
from pychron.processing.analyses.dbanalysis import DBAnalysis
ANALYSIS_CACHE = {}
ANALYSIS_CACHE_COUNT = {}
CACHE_LIMIT = 1000
@provides(IDatastore)
class BaseIsotopeDatabaseManager(Loggable):
db = Any
_db_klass = Any
datasource_url = Property
precedence = Int(0)
def __init__(
self,
bind=True,
connect=True,
warn=True,
version_warn=False,
attribute_warn=False,
*args,
**kw
):
super(BaseIsotopeDatabaseManager, self).__init__(*args, **kw)
if bind:
try:
self.bind_preferences()
except AttributeError:
import traceback
traceback.print_exc()
if connect:
self.db.connect(
warn=warn, version_warn=version_warn, attribute_warn=attribute_warn
)
# IDatastore protocol
def get_greatest_aliquot(self, identifier):
ret = 0
if self.db:
ret = self.db.get_greatest_aliquot(identifier)
return ret or 0
def get_greatest_step(self, identifier, aliquot):
ret = 0
if self.db:
ret = self.db.get_greatest_step(identifier, aliquot)
return ret
def connect(self, *args, **kw):
if self.db:
return self.db.connect(*args, **kw)
def is_connected(self):
if self.db:
return self.db.connected
def load(self):
self.populate_default_tables()
return True
def populate_default_tables(self, db=None):
self.debug("populating default tables")
if db is None:
db = self.db
if db:
if db.connect(force=False):
from pychron.database.defaults import load_isotopedb_defaults
load_isotopedb_defaults(db)
self.debug("defaults finished")
return True
def bind_preferences(self):
if self.db is None:
self.db = self._db_factory()
prefid = "pychron.database"
bind_preference(self.db, "kind", "{}.kind".format(prefid))
if self.db.kind == "mysql":
bind_preference(self.db, "host", "{}.host".format(prefid))
bind_preference(self.db, "username", "{}.username".format(prefid))
bind_preference(self.db, "password", "{}.password".format(prefid))
bind_preference(self.db, "name", "{}.db_name".format(prefid))
# bind_preference(self.db, 'save_username', '{}.save_username'.format(prefid))
def open_progress(self, n=2, **kw):
return self._open_progress(n, **kw)
def _open_progress(self, n, close_at_end=True):
from pychron.core.ui.progress_dialog import myProgressDialog
pd = myProgressDialog(
max=n - 1, close_at_end=close_at_end, can_cancel=True, can_ok=True
)
pd.open()
# pd.on_trait_change(self._progress_closed, 'closed')
return pd
def _progress_closed(self):
if self.application:
win = self.application.windows[-1]
win.activate()
def _db_factory(self):
db = self._db_klass(application=self.application)
return db
def _get_datasource_url(self):
if self.db:
return self.db.datasource_url
# ===============================================================================
# defaults
# ===============================================================================
def _db_default(self):
return self._db_factory()
class IsotopeDatabaseManager(BaseIsotopeDatabaseManager):
_db_klass = IsotopeAdapter
irradiation = Str
level = Str
irradiations = Property(depends_on="saved, updated")
levels = Property(depends_on="irradiation, saved, updated")
saved = Event
updated = Event
def save_flux(self, labnumber, v, e):
db = self.db
with db.session_ctx():
dbln = db.get_labnumber(labnumber)
if dbln:
dbpos = dbln.irradiation_position
dbhist = db.add_flux_history(dbpos)
dbflux = db.add_flux(float(v), float(e))
dbflux.history = dbhist
dbln.selected_flux_history = dbhist
self.information_dialog(
u"Flux for {} {} \u00b1{} saved to database".format(labnumber, v, e)
)
def filter_analysis_tag(self, ans, exclude):
if not isinstance(exclude, (list, tuple)):
exclude = (exclude,)
return [x for x in ans if x.tag not in exclude]
def load_raw_data(self, ai):
if not ai.has_raw_data:
db = self.db
with db.session_ctx():
dban = db.get_analysis_uuid(ai.uuid)
for dbiso in dban.isotopes:
name = dbiso.molecular_weight.name
try:
iso = ai.isotopes[name]
blob = dbiso.signal.data
if dbiso.kind == "signal":
iso.unpack_data(blob)
elif dbiso.kind == "baseline":
iso.baseline.unpack_data(blob)
elif dbiso.kind == "sniff":
iso.sniff.unpack_data(blob)
except KeyError:
self.debug(
"load_raw_data: no matching isotope for {}, {}".format(
name, ",".join(ai.isotope_keys)
)
)
# !!using db.get_analysis_isotopes is extremely slow!! why is unknown
# dbisos = db.get_analysis_isotopes(ai.uuid)
# isos = ai.isotopes
# for dban, dbiso, dbmw in dbisos:
# name = dbmw.name
# if name in isos:
# blob = dbiso.signal.data
# iso = isos[name]
# if dbiso.kind == 'signal':
# iso.unpack_data(blob)
# elif dbiso.kind == 'baseline':
# iso.baseline.unpack_data(blob)
# elif dbiso.kind == 'sniff':
# iso.sniff.unpack_data(blob)
ai.has_raw_data = True
def make_analysis(self, ai, **kw):
return self.make_analyses((ai,), **kw)[0]
def _calculate_cached_ages(self, ans, calculate_age, calculate_F):
if ans:
if calculate_age:
self.debug("calculated cached analysis ages")
for ca in ans:
ca.calculate_age()
elif calculate_F:
self.debug("calculated cached analysis F")
for ca in ans:
ca.calculate_age()
def _load_aux_cached_analyses(self, ans):
db_ans = []
no_db_ans = []
for ca in ans:
if not ca.has_changes:
no_db_ans.append(ca)
else:
db_ans.append(ca)
return db_ans, no_db_ans
def _unpack_cached_analyses(self, ans, calculate_age, calculate_F):
no_db_ans = []
db_ans = []
for ca in ans:
if not ca.has_raw_data:
print(ca.record_id, "no rawasffas")
no_db_ans.append(ca)
else:
if calculate_age:
ca.calculate_age()
elif calculate_F:
ca.calculate_f()
db_ans.append(ca)
return db_ans, no_db_ans
def _increment_cache(self, cached_ans, use_cache):
if use_cache:
for ci in cached_ans:
self._add_to_cache(ci)
# def _clone_vcs_repos(self, no_db_ans):
# if self.use_vcs:
# #clone the necessary project repositories
# def f(x):
# try:
# return x.labnumber.sample.project.name
# except AttributeError:
# pass
#
# prs = filter(lambda x: not x is None, (f(ai) for ai in no_db_ans))
# self.vcs.clone_project_repos(prs)
def _setup_progress(self, n, progress, use_progress):
if n > 1:
if progress is not None:
if progress.max < (n + progress.get_value()):
progress.increase_max(n + 2)
elif use_progress:
progress = self._open_progress(n + 2)
return progress
def make_analyses(
self,
ans,
progress=None,
use_progress=True,
exclude=None,
use_cache=True,
unpack=False,
calculate_age=False,
calculate_F=False,
load_aux=False,
**kw
):
"""
loading the analysis' signals appears to be the most expensive operation.
the majority of the load time is in _construct_analysis
"""
if exclude:
ans = self.filter_analysis_tag(ans, exclude)
if not ans:
self.debug("no analyses to load")
return []
db = self.db
with db.session_ctx():
# partition into DBAnalysis vs IsotopeRecordView
db_ans, no_db_ans = list(
map(list, partition(ans, lambda x: isinstance(x, DBAnalysis)))
)
self._calculate_cached_ages(db_ans, calculate_age, calculate_F)
if unpack:
for di in db_ans:
if not di.has_raw_data:
no_db_ans.append(di)
db_ans.remove(di)
if load_aux:
for di in db_ans:
if not di.has_changes:
if di not in no_db_ans:
no_db_ans.append(di)
db_ans.remove(di)
if no_db_ans:
if use_cache:
# partition into cached and non cached analyses
cached_ans, no_db_ans = partition(
no_db_ans, lambda x: x.uuid in ANALYSIS_CACHE
)
cached_ans = list(cached_ans)
no_db_ans = list(no_db_ans)
cns = [ANALYSIS_CACHE[ci.uuid] for ci in cached_ans]
# if unpack is true make sure cached analyses have raw data
if unpack or load_aux:
if unpack:
a, b = self._unpack_cached_analyses(
cns, calculate_age, calculate_F
)
db_ans.extend(a)
no_db_ans.extend(b)
if load_aux:
a, b = self._load_aux_cached_analyses(cns)
db_ans.extend(a)
no_db_ans.extend(b)
else:
self._calculate_cached_ages(cns, calculate_age, calculate_F)
# add analyses from cache to db_ans
db_ans.extend(cns)
# increment value in cache_count
self._increment_cache(cached_ans, use_cache)
# load remaining analyses
n = len(no_db_ans)
if n:
# self._clone_vcs_repos(no_db_ans)
progress = self._setup_progress(n, progress, use_progress)
db_ans, new_ans = self._construct_analyses(
no_db_ans,
db_ans,
progress,
calculate_age,
calculate_F,
unpack,
use_cache,
use_progress,
load_aux=load_aux,
**kw
)
db_ans.extend(new_ans)
# self.debug('use vcs {}'.format(self.use_vcs))
# if self.use_vcs:
# if progress:
# progress.increase_max(len(new_ans)+1)
# progress.change_message('Adding analyses to vcs')
#
# self.vcs.add_analyses(new_ans, progress=progress)
# self.debug('use offline database {}'.format(self.use_offline_database))
# if self.use_offline_database:
# if progress:
# progress.increase_max(len(new_ans) + 1)
# progress.change_message('Transferring analyses for offline usage')
# self.offline_bridge.add_analyses(db, new_ans, progress=progress)
if progress:
progress.soft_close()
return db_ans
def get_level(self, level, irradiation=None):
if irradiation is None:
irradiation = self.irradiation
return self.db.get_irradiation_level(irradiation, level)
def remove_from_cache(self, ans):
if not isinstance(ans, (list, tuple)):
ans = (ans,)
for ai in ans:
uuid = ai.uuid
if uuid in ANALYSIS_CACHE:
self.debug("remove {} from cache".format(ai.record_id))
ANALYSIS_CACHE.pop(uuid)
ANALYSIS_CACHE_COUNT.pop(uuid)
def verify_database_connection(self, inform=True):
db = self.db
if db is not None:
if db.connect(force=True):
return True
# self.db.flush()
# self.db.reset()
elif inform:
self.warning_dialog("Not Database available")
# ===============================================================================
# private
# ===============================================================================
def _construct_analyses(
self,
no_db_ans,
db_ans,
progress,
calculate_age,
calculate_F,
unpack,
use_cache,
use_progress,
**kw
):
uuids = [ri.uuid for ri in no_db_ans]
# for ui in uuids:
# self.debug('loading uuid={}'.format(ui))
# get all dbrecords with one call
# print uuids
ms = self.db.get_analyses_uuid(uuids)
# print ms
# ms = timethis(self.db.get_analyses_uuid, args=(uuids,))
construct = self._construct_analysis
add_to_cache = self._add_to_cache
key = lambda x: x[0]
dbrecords = groupby(ms, key=key)
def func(x, prog, i, n):
_, gi = next(dbrecords)
self.debug("constructing {}/{} {} {}".format(i + 1, n, x.record_id, x.uuid))
a = construct(
x,
gi,
prog,
unpack=unpack,
calculate_age=calculate_age,
calculate_F=calculate_F,
**kw
)
# print a
if use_cache:
add_to_cache(a)
return a
try:
return db_ans, progress_loader(
no_db_ans,
func,
progress=progress,
use_progress=use_progress,
reraise_cancel=True,
)
except CancelLoadingError:
return [], []
def _construct_analysis(
self,
rec,
group,
prog,
calculate_age=True,
calculate_F=False,
unpack=False,
load_aux=False,
):
atype = None
if isinstance(rec, meas_AnalysisTable):
rid = make_runid(rec.labnumber.identifier, rec.aliquot, rec.step)
atype = rec.measurement.analysis_type.name
elif hasattr(rec, "record_id"):
rid = rec.record_id
else:
rid = id(rec)
graph_id = 0
group_id = 0
if hasattr(rec, "group_id"):
group_id = rec.group_id
if hasattr(rec, "graph_id"):
graph_id = rec.graph_id
if atype is None:
atype = rec.analysis_type
if prog:
m = ""
if calculate_age:
show_age = atype in ("unknown", "cocktail")
m = "calculating age" if show_age else ""
elif calculate_F:
m = "calculating F"
msg = "loading {}. {}".format(rid, m)
prog.change_message(msg)
if isinstance(rec, DBAnalysis):
ai = rec
if load_aux:
ai.sync_aux(group)
else:
ai.sync(group, unpack=unpack, load_aux=load_aux)
else:
ai = DBAnalysis() # if not self.use_vcs else VCSAnalysis
# print ai
ai.sync(group, unpack=unpack, load_aux=load_aux)
# print ai, group
# ai = klass(group_id=group_id,
# graph_id=graph_id)
# ai.trait_set(group_id=group_id,
# graph_id=graph_id)
# if not self.use_vcs:
#
# timethis(ai.sync, args=(group,),
# kwargs=dict(unpack=unpack, load_aux=load_aux))
if atype in ("unknown", "cocktail"):
if calculate_age:
# timethis(ai.sync, args=(meas_analysis, ),
# kwargs=dict(unpack=unpack, load_aux=load_aux))
# timethis(ai.calculate_age, kwargs=dict(force=not self.use_vcs))
ai.calculate_age()
# timethis(ai.sync, args=(meas_analysis,),
# kwargs=dict(unpack=unpack, load_aux=load_aux))
# timethis(ai.calculate_age)
# synced = True
if calculate_F:
ai.calculate_f()
# if not synced:
# ai.sync(group, unpack=unpack, load_aux=load_aux)
return ai
def _add_to_cache(self, rec):
if rec is None:
self.debug("cannot add None to cache")
if rec.uuid not in ANALYSIS_CACHE:
# self.debug('Adding {} to cache'.format(rec.record_id))
ANALYSIS_CACHE[rec.uuid] = weakref.ref(rec)()
ANALYSIS_CACHE_COUNT[rec.uuid] = 1
else:
ANALYSIS_CACHE_COUNT[rec.uuid] += 1
# remove items from cached based on frequency of use
if len(ANALYSIS_CACHE) > CACHE_LIMIT:
s = sorted(six.iteritems(ANALYSIS_CACHE_COUNT), key=lambda x: x[1])
k, v = s[0]
ANALYSIS_CACHE.pop(k)
ANALYSIS_CACHE_COUNT.pop(k)
self.debug(
"Cache limit exceeded {}. removing {} n uses={}".format(
CACHE_LIMIT, k, v
)
)
# ===============================================================================
# property get/set
# ===============================================================================
@cached_property
def _get_irradiations(self):
r = []
db = self.db
if db and db.connected:
with db.session_ctx():
r = [ri.name for ri in db.get_irradiations() if ri.name]
if r and not self.irradiation:
self.irradiation = r[0]
return r
@cached_property
def _get_levels(self):
r = []
if self.db and self.db.connected:
with self.db.session_ctx():
irrad = self.db.get_irradiation(self.irradiation)
if irrad:
r = sorted([ri.name for ri in irrad.levels if ri.name])
if r and not self.level:
self.level = r[0]
return r
# ===============================================================================
# handlers
# ===============================================================================
def _irradiation_changed(self):
self.level = ""
# ============= EOF =============================================
| |
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
from scipy import linalg
from .constants import FIFF
from .meas_info import _check_ch_keys
from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
from .proj import setup_proj
from .pick import pick_types, pick_channels, pick_channels_forward
from .base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..utils import (logger, warn, verbose, _validate_type, _check_preload,
_check_option)
from ..defaults import DEFAULTS
def _copy_channel(inst, ch_name, new_ch_name):
"""Add a copy of a channel specified by ch_name.
Input data can be in the form of Raw, Epochs or Evoked.
The instance object is modified inplace.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the EEG channels
ch_name : str
Name of the channel to copy.
new_ch_name : str
Name given to the copy of the channel.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The data with a copy of a given channel.
"""
new_inst = inst.copy().pick_channels([ch_name])
new_inst.rename_channels({ch_name: new_ch_name})
inst.add_channels([new_inst], force_update_info=True)
return inst
def _apply_reference(inst, ref_from, ref_to=None, forward=None,
ch_type='auto'):
"""Apply a custom EEG referencing scheme."""
# Check to see that data is preloaded
_check_preload(inst, "Applying a reference")
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {ch_type: True, 'meg': False, 'ref_meg': False}
eeg_idx = pick_types(inst.info, **ch_dict)
if ref_to is None:
ref_to = [inst.ch_names[i] for i in eeg_idx]
extra = 'EEG channels found'
else:
extra = 'channels supplied'
if len(ref_to) == 0:
raise ValueError('No %s to apply the reference to' % (extra,))
# After referencing, existing SSPs might not be valid anymore.
projs_to_remove = []
for i, proj in enumerate(inst.info['projs']):
# Remove any average reference projections
if proj['desc'] == 'Average EEG reference' or \
proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF:
logger.info('Removing existing average EEG reference '
'projection.')
# Don't remove the projection right away, but do this at the end of
# this loop.
projs_to_remove.append(i)
# Inactive SSPs may block re-referencing
elif (not proj['active'] and
len([ch for ch in (ref_from + ref_to)
if ch in proj['data']['col_names']]) > 0):
raise RuntimeError(
'Inactive signal space projection (SSP) operators are '
'present that operate on sensors involved in the desired '
'referencing scheme. These projectors need to be applied '
'using the apply_proj() method function before the desired '
'reference can be set.'
)
for i in projs_to_remove:
del inst.info['projs'][i]
# Need to call setup_proj after changing the projs:
inst._projector, _ = \
setup_proj(inst.info, add_eeg_ref=False, activate=False)
# Compute reference
if len(ref_from) > 0:
# this is guaranteed below, but we should avoid the crazy pick_channels
# behavior that [] gives all. Also use ordered=True just to make sure
# that all supplied channels actually exist.
assert len(ref_to) > 0
ref_names = ref_from
ref_from = pick_channels(inst.ch_names, ref_from, ordered=True)
ref_to = pick_channels(inst.ch_names, ref_to, ordered=True)
data = inst._data
ref_data = data[..., ref_from, :].mean(-2, keepdims=True)
data[..., ref_to, :] -= ref_data
ref_data = ref_data[..., 0, :]
# If the reference touches EEG/ECoG/sEEG electrodes, note in the info
# that a non-CAR has been applied.
if len(np.intersect1d(ref_to, eeg_idx)) > 0:
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON
# REST
if forward is not None:
# use ch_sel and the given forward
forward = pick_channels_forward(forward, ref_names, ordered=True)
# 1-3. Compute a forward (G) and avg-ref'ed data (done above)
G = forward['sol']['data']
assert G.shape[0] == len(ref_names)
# 4. Compute the forward (G) and average-reference it (Ga):
Ga = G - np.mean(G, axis=0, keepdims=True)
# 5. Compute the Ga_inv by SVD
Ga_inv = linalg.pinv(Ga, rcond=1e-6)
# 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv
Ra = G @ Ga_inv
# 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp)
Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True)
data[..., ref_to, :] += Vpa
else:
ref_data = None
return inst, ref_data
def add_reference_channels(inst, ref_channels, copy=True):
"""Add reference channels to data that consists of all zeros.
Adds reference channels to data that were not included during recording.
This is useful when you need to re-reference your data to different
channels. These added channels will consist of all zeros.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
ref_channels : str | list of str
Name of the electrode(s) which served as the reference in the
recording. If a name is provided, a corresponding channel is added
and its data is set to 0. This is useful for later re-referencing.
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with added EEG reference channels.
"""
# Check to see that data is preloaded
_check_preload(inst, 'add_reference_channels')
_validate_type(ref_channels, (list, tuple, str), 'ref_channels')
if isinstance(ref_channels, str):
ref_channels = [ref_channels]
for ch in ref_channels:
if ch in inst.info['ch_names']:
raise ValueError("Channel %s already specified in inst." % ch)
# Once CAR is applied (active), don't allow adding channels
if _has_eeg_average_ref_proj(inst.info['projs'], check_active=True):
raise RuntimeError('Average reference already applied to data.')
if copy:
inst = inst.copy()
if isinstance(inst, (BaseRaw, Evoked)):
data = inst._data
refs = np.zeros((len(ref_channels), data.shape[1]))
data = np.vstack((data, refs))
inst._data = data
elif isinstance(inst, BaseEpochs):
data = inst._data
x, y, z = data.shape
refs = np.zeros((x * len(ref_channels), z))
data = np.vstack((data.reshape((x * y, z), order='F'), refs))
data = data.reshape(x, y + len(ref_channels), z, order='F')
inst._data = data
else:
raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
% type(inst))
nchan = len(inst.info['ch_names'])
# only do this if we actually have digitisation points
if inst.info.get('dig', None) is not None:
# "zeroth" EEG electrode dig points is reference
ref_dig_loc = [dl for dl in inst.info['dig'] if (
dl['kind'] == FIFF.FIFFV_POINT_EEG and
dl['ident'] == 0)]
if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels):
ref_dig_array = np.zeros(12)
warn('The locations of multiple reference channels are ignored '
'(set to zero).')
else: # n_ref_channels == 1 and a single ref digitization exists
ref_dig_array = np.concatenate((ref_dig_loc[0]['r'],
ref_dig_loc[0]['r'], np.zeros(6)))
# Replace the (possibly new) Ref location for each channel
for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]):
inst.info['chs'][idx]['loc'][3:6] = ref_dig_loc[0]['r']
else:
# we should actually be able to do this from the montage, but
# it looks like the montage isn't stored, so we can't extract
# this information. The user will just have to call set_montage()
# by setting this to zero, we fall back to the old behavior
# when missing digitisation
ref_dig_array = np.zeros(12)
for ch in ref_channels:
chan_info = {'ch_name': ch,
'coil_type': FIFF.FIFFV_COIL_EEG,
'kind': FIFF.FIFFV_EEG_CH,
'logno': nchan + 1,
'scanno': nchan + 1,
'cal': 1,
'range': 1.,
'unit_mul': 0.,
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'loc': ref_dig_array}
inst.info['chs'].append(chan_info)
inst.info._update_redundant()
if isinstance(inst, BaseRaw):
inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
range_ = np.arange(1, len(ref_channels) + 1)
for pi, picks in enumerate(inst._read_picks):
inst._read_picks[pi] = np.concatenate(
[picks, np.max(picks) + range_])
inst.info._check_consistency()
set_eeg_reference(inst, ref_channels=ref_channels, copy=False,
verbose=False)
return inst
_ref_dict = {
FIFF.FIFFV_MNE_CUSTOM_REF_ON: 'on',
FIFF.FIFFV_MNE_CUSTOM_REF_OFF: 'off',
FIFF.FIFFV_MNE_CUSTOM_REF_CSD: 'CSD',
}
def _check_can_reref(inst):
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance")
current_custom = inst.info['custom_ref_applied']
if current_custom not in (FIFF.FIFFV_MNE_CUSTOM_REF_ON,
FIFF.FIFFV_MNE_CUSTOM_REF_OFF):
raise RuntimeError('Cannot set new reference on data with custom '
'reference type %r' % (_ref_dict[current_custom],))
@verbose
def set_eeg_reference(inst, ref_channels='average', copy=True,
projection=False, ch_type='auto', forward=None,
verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
%(set_eeg_reference_ref_channels)s
copy : bool
Specifies whether the data will be copied (True) or modified in-place
(False). Defaults to True.
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'`` and
``projection=True`` a projection will be added instead of directly
re-referencing the data.
ref_data : array
Array of reference data subtracted from EEG channels. This will be
``None`` if ``projection=True`` or ``ref_channels='REST'``.
%(set_eeg_reference_see_also_notes)s
"""
from ..forward import Forward
_check_can_reref(inst)
if projection: # average reference projector
if ref_channels != 'average':
raise ValueError('Setting projection=True is only supported for '
'ref_channels="average", got %r.'
% (ref_channels,))
if _has_eeg_average_ref_proj(inst.info['projs']):
warn('An average reference projection was already added. The data '
'has been left untouched.')
else:
# Creating an average reference may fail. In this case, make
# sure that the custom_ref_applied flag is left untouched.
custom_ref_applied = inst.info['custom_ref_applied']
try:
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF
inst.add_proj(make_eeg_average_ref_proj(inst.info,
activate=False))
except Exception:
inst.info['custom_ref_applied'] = custom_ref_applied
raise
# If the data has been preloaded, projections will no
# longer be automatically applied.
if inst.preload:
logger.info('Average reference projection was added, '
'but has not been applied yet. Use the '
'apply_proj method to apply it.')
return inst, None
del projection # not used anymore
inst = inst.copy() if copy else inst
ch_type = _get_ch_type(inst, ch_type)
ch_dict = {ch_type: True, 'meg': False, 'ref_meg': False}
eeg_idx = pick_types(inst.info, **ch_dict)
ch_sel = [inst.ch_names[i] for i in eeg_idx]
if ref_channels == 'REST':
_validate_type(forward, Forward, 'forward when ref_channels="REST"')
else:
forward = None # signal to _apply_reference not to do REST
if ref_channels in ('average', 'REST'):
logger.info(f'Applying {ref_channels} reference.')
ref_channels = ch_sel
if ref_channels == []:
logger.info('EEG data marked as already having the desired reference.')
else:
logger.info('Applying a custom %s '
'reference.' % DEFAULTS['titles'][ch_type])
return _apply_reference(inst, ref_channels, ch_sel, forward,
ch_type=ch_type)
def _get_ch_type(inst, ch_type):
_validate_type(ch_type, str, 'ch_type')
_check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg'))
# if ch_type is 'auto', search through list to find first reasonable
# reference-able channel type.
if ch_type == 'auto':
for type_ in ['eeg', 'ecog', 'seeg']:
if type_ in inst:
ch_type = type_
logger.info('%s channel type selected for '
're-referencing' % DEFAULTS['titles'][type_])
break
# if auto comes up empty, or the user specifies a bad ch_type.
else:
raise ValueError('No EEG, ECoG or sEEG channels found '
'to rereference.')
return ch_type
@verbose
def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
drop_refs=True, copy=True, verbose=None):
"""Re-reference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped.
Multiple anodes and cathodes can be specified, in which case multiple
virtual channels will be created. The 1st anode will be subtracted from the
1st cathode, the 2nd anode from the 2nd cathode, etc.
By default, the virtual channels will be annotated with channel info of
the anodes, their locations set to (0, 0, 0) and coil types set to
EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
drop_refs : bool
Whether to drop the anode/cathode channels from the instance.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
"""
_check_can_reref(inst)
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError('Number of anodes (got %d) must equal the number '
'of cathodes (got %d).' % (len(anode), len(cathode)))
if ch_name is None:
ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError('Number of channel names must equal the number of '
'anodes/cathodes (got %d).' % len(ch_name))
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError('There is already a channel named "%s", please '
'specify a different name for the bipolar '
'channel using the ch_name parameter.' % ch)
if ch_info is None:
ch_info = [{} for _ in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError('Number of channel info dictionaries must equal the '
'number of anodes/cathodes.')
# Merge specified and anode channel information dictionaries
new_chs = []
for ci, (an, ch) in enumerate(zip(anode, ch_info)):
_check_ch_keys(ch, ci, name='ch_info', check_min=False)
an_idx = inst.ch_names.index(an)
this_chs = deepcopy(inst.info['chs'][an_idx])
# Set channel location and coil type
this_chs['loc'] = np.zeros(12)
this_chs['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
this_chs.update(ch)
new_chs.append(this_chs)
if copy:
inst = inst.copy()
for i, (an, ca, name, chs) in enumerate(
zip(anode, cathode, ch_name, new_chs)):
if an in anode[i + 1:] or an in cathode[i + 1:] or not drop_refs:
# Make a copy of the channel if it's still needed later
# otherwise it's modified inplace
_copy_channel(inst, an, 'TMP')
an = 'TMP'
_apply_reference(inst, [ca], [an]) # ensures preloaded
an_idx = inst.ch_names.index(an)
inst.info['chs'][an_idx] = chs
inst.info['chs'][an_idx]['ch_name'] = name
logger.info('Bipolar channel added as "%s".' % name)
inst.info._update_redundant()
# Drop remaining channels.
if drop_refs:
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
inst.drop_channels(drop_channels)
return inst
| |
"""Create publication class and contain methods for data fetching."""
import time
import requests
import json
from snovault import (
collection,
load_schema,
calculated_property,
CONNECTION
)
from snovault.crud_views import (
collection_add,
item_edit,
)
from snovault.attachment import ItemWithAttachment
from snovault.util import debug_log
from .base import (
Item,
lab_award_attribution_embed_list
)
from pyramid.view import (
view_config
)
from html.parser import HTMLParser
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch,
validate_item_content_in_place,
no_validate_item_content_post,
no_validate_item_content_put,
no_validate_item_content_patch
)
################################################
# Outside methods for online data fetch
################################################
def find_best_date(date_data):
date = None
a2d = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06',
'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
if 'DP' in date_data:
ymd = [d.strip() for d in date_data['DP'].split(' ')]
if not ymd or len(ymd[0]) != 4: # problem with the year
pass
else:
date = ymd.pop(0)
if ymd:
mm = a2d.get(ymd.pop(0))
if mm:
date += '-{}'.format(mm)
if ymd:
dd = ymd.pop(0)
if len(dd) <= 2:
date += '-{}'.format(dd.zfill(2))
return date
if 'DEP' in date_data:
date = date_data['DEP']
if len(date) != 8:
date = None
if not date and 'DA' in date_data:
date = date_data['DA']
if date:
datestr = date[:4]+"-"+date[4:6]+"-"+date[6:8]
if len(datestr) == 10:
return datestr
return None
def fetch_pubmed(PMID):
"Takes the number part of PMID and returns title, abstract and authors"
field2prop = {'TI': 'title', 'AB': 'abstract', 'DP': 'date_published',
'DEP': 'date_published', 'DA': 'date_published', 'JT': 'journal',
'AU': 'authors', 'CN': 'authors'}
pub_data = {v: None for v in field2prop.values()}
pub_data['authors'] = []
pub_data['date_published'] = {}
NIHe = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
NIHw = "https://www.ncbi.nlm.nih.gov/pubmed/"
url = NIHw + PMID
pub_data['url'] = url
www = "{NIH}efetch.fcgi?db=pubmed&id={id}&rettype=medline".format(NIH=NIHe, id=PMID)
# try fetching data 5 times
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
if resp.status_code == 429:
time.sleep(5)
continue
if count == 4:
return {}
# parse the text to get the fields
r = resp.text
full_text = r.replace('\n ', ' ')
for line in full_text.split('\n'):
if line.strip():
key, val = [a.strip() for a in line.split('-', 1)]
if key in field2prop:
if key in ['DP', 'DEP', 'DA']:
pub_data[field2prop[key]][key] = val
elif key in ['AU', 'CN']:
pub_data[field2prop[key]].append(val)
else:
pub_data[field2prop[key]] = val
# deal with date
if pub_data['date_published']: # there is some date data
pub_data['date_published'] = find_best_date(pub_data['date_published'])
return {k: v for k, v in pub_data.items() if v is not None}
class BioRxivExtractor(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.title = ''
self.abstract = ''
self.authors = []
self.date_published = ''
def handle_starttag(self, tag, attrs):
attr = ''
if tag == 'meta':
attr = {k[0]: k[1] for k in attrs}
if attr.get('name') == "DC.Title":
self.title = attr.get('content')
if attr.get('name') == "DC.Description":
self.abstract = attr.get('content')
if attr.get('name') == "DC.Contributor":
self.authors.append(attr.get('content'))
if attr.get('name') == "DC.Date":
self.date_published = attr.get('content')
def fetch_biorxiv(url):
"""Takes Url, uses the BioRxivExtractor class and returns title abstract authors url"""
parserfields = ['title', 'abstract', 'authors', 'date_published']
# try fetching data 5 times and return empty if fails
for count in range(5):
r = requests.get(url)
if r.status_code == 200:
break
if count == 4:
return
resp = r.text.encode('utf-8').decode('ascii', 'ignore')
parser = BioRxivExtractor()
parser.feed(resp)
pub_data = {f: getattr(parser, f) for f in parserfields if hasattr(parser, f)}
pub_data['url'] = url
pub_data['journal'] = 'bioRxiv'
return pub_data
def map_doi_pmid(doi):
"""If a doi is given, checks if it maps to pmid"""
NIHid = "https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/"
www = "{NIH}?ids={id}&versions=no&format=json".format(NIH=NIHid, id=doi)
# try fetching data 5 times
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
# parse the text to get the fields
r = resp.text
r = requests.get(www).text
res = json.loads(r)
try:
return res['records'][0]['pmid']
except Exception:
return
def map_doi_biox(doi):
"If a doi is not mapped to pubmed, check where it goes"
DOIad = "https://doi.org/"
www = "{DOIad}{doi}".format(DOIad=DOIad, doi=doi)
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
landing_page = resp.url
if "biorxiv" in landing_page.lower():
return landing_page
else:
return
################################################
# Outside methods for online data fetch
################################################
def _build_publication_embedded_list():
""" Helper function intended to be used to create the embedded list for publication.
All types should implement a function like this going forward.
"""
return Item.embedded_list + lab_award_attribution_embed_list + [
# ExperimentSet linkTo
"exp_sets_prod_in_pub.accession",
# ExperimentType linkTo
"exp_sets_prod_in_pub.experimentset_type",
# ExperimentType linkTo
"exp_sets_prod_in_pub.experiments_in_set.experiment_type.title",
]
@collection(
name='publications',
properties={
'title': 'Publications',
'description': 'Publication pages',
})
class Publication(Item, ItemWithAttachment):
"""Publication class."""
item_type = 'publication'
schema = load_schema('encoded:schemas/publication.json')
embedded_list = _build_publication_embedded_list()
class Collection(Item.Collection):
pass
def _update(self, properties, sheets=None):
# logic for determing whether to use manually-provided date_published
try:
prev_date_published = self.properties.get('date_published')
except KeyError: # if new user, previous properties do not exist
prev_date_published = None
new_date_published = properties.get('date_published')
self.upgrade_properties()
pub_data = {}
p_id = properties['ID']
# parse if id is from pubmed
try:
if p_id.startswith('PMID'):
pubmed_id = p_id[5:]
pub_data = fetch_pubmed(pubmed_id)
# if id is doi, first check if it maps to pubmed id, else see where it goes
elif p_id.startswith('doi'):
doi_id = p_id[4:]
pubmed_id = map_doi_pmid(doi_id)
if pubmed_id:
pub_data = fetch_pubmed(pubmed_id)
# if it goes to biorxiv fetch from biorxiv
else:
biox_url = map_doi_biox(doi_id)
if biox_url:
pub_data = fetch_biorxiv(biox_url)
else:
pass
except Exception:
pass
if pub_data:
for k, v in pub_data.items():
properties[k] = v
# allow override of date_published
if new_date_published is not None and prev_date_published != new_date_published:
properties['date_published'] = new_date_published
super(Publication, self)._update(properties, sheets)
return
@calculated_property(schema={
"title": "Short Attribution",
"description": "Short string containing <= 2 authors & year published.",
"type": "string"
})
def short_attribution(self, authors=None, date_published=None):
minipub = ''
if authors:
minipub = authors[0]
if len(authors) > 2:
minipub = minipub + ' et al.'
elif len(authors) == 2:
minipub = minipub + ' and ' + authors[1]
if date_published:
minipub = minipub + ' (' + date_published[0:4] + ')'
return minipub
@calculated_property(schema={
"title": "Display Title",
"description": "Publication short attribution, year, and ID (if available).",
"type": "string"
})
def display_title(self, ID, authors=None, date_published=None):
minipub = self.short_attribution(authors, date_published)
if minipub:
return minipub + ' ' + ID
return ID
@calculated_property(schema={
"title": "Number of Experiment Sets",
"description": "The number of experiment sets produced by this publication.",
"type": "integer"
})
def number_of_experiment_sets(self, request, exp_sets_prod_in_pub=None):
if exp_sets_prod_in_pub:
return len(exp_sets_prod_in_pub)
#### Add validator to ensure ID field is unique
def validate_unique_pub_id(context, request):
'''validator to ensure publication 'ID' field is unique
'''
data = request.json
# ID is required; validate_item_content_post/put/patch will handle missing field
if 'ID' in data:
lookup_res = request.registry[CONNECTION].storage.get_by_json('ID', data['ID'], 'publication')
if lookup_res:
# check_only + POST happens on GUI edit; we cannot confirm if found
# item is the same item. Let the PATCH take care of validation
if request.method == 'POST' and request.params.get('check_only', False):
return
# editing an item will cause it to find itself. That's okay
if hasattr(context, 'uuid') and getattr(lookup_res, 'uuid', None) == context.uuid:
return
error_msg = ("publication %s already exists with ID '%s'. This field must be unique"
% (lookup_res.uuid, data['ID']))
request.errors.add('body', 'Publication: non-unique ID', error_msg)
return
@view_config(context=Publication.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post, validate_unique_pub_id])
@view_config(context=Publication.Collection, permission='add_unvalidated', request_method='POST',
validators=[no_validate_item_content_post],
request_param=['validate=false'])
@debug_log
def publication_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=Publication, permission='edit', request_method='PUT',
validators=[validate_item_content_put, validate_unique_pub_id])
@view_config(context=Publication, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch, validate_unique_pub_id])
@view_config(context=Publication, permission='edit_unvalidated', request_method='PUT',
validators=[no_validate_item_content_put],
request_param=['validate=false'])
@view_config(context=Publication, permission='edit_unvalidated', request_method='PATCH',
validators=[no_validate_item_content_patch],
request_param=['validate=false'])
@view_config(context=Publication, permission='index', request_method='GET',
validators=[validate_item_content_in_place, validate_unique_pub_id],
request_param=['check_only=true'])
@debug_log
def publication_edit(context, request, render=None):
return item_edit(context, request, render)
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
import six
from messager.common import excutils
from messager.common.gettextutils import _, _LE, _LI
from messager.common import network_utils
from messager.common.rpc import amqp as rpc_amqp
from messager.common.rpc import common as rpc_common
from messager.common import sslutils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='If SSL is enabled, the SSL version to use. Valid '
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
'be available on some distributions.'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='Connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='The RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='The RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='The RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='How frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='How long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='Maximum number of RabbitMQ connection retries. '
'Default is 0 (infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
'If you change this option, you must wipe the '
'RabbitMQ database.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect."""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
"""
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_LE("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
self._callback_handler(message, callback)
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started."""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message."""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.amqp_durable_queues,
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in six.iteritems(server_params):
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
brokers_count = len(self.params_list)
self.next_broker_indices = itertools.cycle(range(brokers_count))
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
self.conf.kombu_ssl_version)
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
# Return the extended behavior or just have the default behavior
return ssl_params or True
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_LI("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[next(self.next_broker_indices)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info
LOG.error(msg)
raise rpc_common.RPCException(msg)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues."""
return self.channel
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
six.next(self.consumer_num))
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] # fanout
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
ack_on_error=ack_on_error,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#print 'topic is ', topic
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
ack_on_error=ack_on_error,
)
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| |
#! /usr/bin/env python
import sys
import numpy
from numpy.linalg import inv
DEBUGMODE = 1
WINDOWS_MODE = 1
if DEBUGMODE == 1:
#from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
# Parameters Make sure these values correspond with the input locations otherwise we will get weird results
xres = 300
yres = 500
weight = 0.5
# Lists to maintain
xCoords = []
yCoords = []
rssi = []
#Fast Max index finder
def max_ij(x):
i, j = divmod(x.argmax(), x.shape[1])
return i, j
def pass1(i, j, xmean, ymean, coefmat):
xs = i-xmean
ys = j-ymean
temp = numpy.dot(numpy.array([xs**2, ys**2, xs*ys, xs, ys, 1]), coefmat)
#print temp
return temp
def pass2(i, j, xmean, ymean, coefmat):
xs = i-xmean
ys = j-ymean
func = numpy.array([xs**2, ys**2, 2*xs*ys])
#print func.T
#print coefmat
temp = numpy.dot( func.T , coefmat)
#print temp
temp = numpy.exp(-temp)
#print temp
return temp
#for line in sys.stdin:
i = 0
if WINDOWS_MODE == 1:
fptr = open('raw101.out', 'r')
for line in fptr:
line = line.strip()
(xCoordIn, yCoordIn, rssiIn) = line.split(',')
# print xCoordIn, yCoordIn, rssiIn
xCoords.append(xCoordIn)
yCoords.append(yCoordIn)
rssi.append(rssiIn)
fptr.close()
# if i == 0:
# line = line.strip()
# (xres,yres) = line.split(',')
# xres = int(xres)
# yres = int(yres)
# i += 1
# else:
else:
for line in sys.stdin:
line = line.strip()
(xCoordIn, yCoordIn, rssiIn) = line.split(',')
# print xCoordIn, yCoordIn, rssiIn
xCoords.append(xCoordIn)
yCoords.append(yCoordIn)
rssi.append(rssiIn)
#fptr.close()
xloc = numpy.asarray(xCoords, dtype='int32')
yloc = numpy.asarray(yCoords, dtype='int32')
rssi = numpy.asarray(rssi, dtype = 'int32')
latmin = numpy.min(xloc)
latmax = numpy.max(xloc)
lonmin = numpy.min(yloc)
lonmax = numpy.max(yloc)
rssiMax = numpy.max(rssi)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
ax.scatter(xloc,yloc,rssi)
#plt.show()
(latmin, latmax) = ax.get_xlim()
(lonmin, lonmax) = ax.get_ylim()
xlin = numpy.linspace(latmin,latmax,xres)
ylin = numpy.linspace(lonmin,lonmax,yres)
# print xCoords
xmean = numpy.mean(xloc)
ymean = numpy.mean(yloc)
# print xmean, ymean
xs = xloc - xmean
ys = yloc - ymean
# print xs
# Form the A Matrix
amat = numpy.ones((len(xs),6))
amat[: , 0] = xs**2
amat[: , 1] = ys**2;
amat[: , 2] = xs*ys;
amat[: , 3] = xs;
amat[: , 4] = ys;
amat[: , 5] = 1e-9;
#print amat
# Note in numpy we have to use dot(A,B) to do matrix multiplication of arrays
amatPI = numpy.dot(amat.T,amat)
coefmat = numpy.dot(numpy.dot(inv(amatPI),amat.T),rssi.T)
#print coefmat
gamemap = numpy.zeros((xres, yres))
for i in range(xres):
for j in range(yres):
gamemap[i,j] = pass1(xlin[i], ylin[j], xmean, ymean, coefmat)
#gamemap = -gamemap/numpy.sum(numpy.sum(numpy.abs(gamemap)))
gamemap = gamemap/numpy.max(numpy.max(numpy.abs(gamemap)))
# Display Gamemap
if DEBUGMODE == 1:
plt.figure()
CS = plt.contourf(gamemap, origin='image')
plt.colorbar()
#plt.figure()
#im = plt.imshow(gamemap, interpolation='bicubic',origin='image', cmap=cm.jet)
#plt.colorbar()
#plt.show()
(xe,ye) = numpy.nonzero(gamemap == numpy.max(numpy.max(gamemap)))
#Save the original game map, we might be able to exploit it
gamemapFat = gamemap
# print xe, ye
## Begin Pass 2_
xs = xloc - xlin[xe]
ys = yloc - ylin[ye]
# print xs
# Form the A Matrix
amat = numpy.ones((len(xs),3))
amat[: , 0] = xs**2
amat[: , 1] = ys**2;
amat[: , 2] = 2*xs*ys;
#print amat
# Note in numpy we have to use dot(A,B) to do matrix multiplication of arrays
amatPI = numpy.dot(amat.T,amat)
coefmat = numpy.dot(numpy.dot(inv(amatPI),amat.T),rssi.T)
# print coefmat
gamemap = numpy.zeros((xres, yres))
# There must be a faster way to do this
for i in range(xres):
for j in range(yres):
gamemap[i,j] = pass2(xlin[i], ylin[j], xlin[xe], ylin[ye], coefmat)
# Display Gamemap
if DEBUGMODE == 1:
# plt.figure()
# CS = plt.contourf(gamemap, origin='image')
# plt.colorbar()
#X,Y = numpy.mgrid[range(xres), range(yres)]
#ax = fig.gca(projection='3d')
#CS = ax.plot_wireframe(gamemap, rstride=1, cstride=1)
plt.figure()
im = plt.imshow(gamemap, interpolation='spline36',origin='image', cmap=cm.jet)
plt.colorbar()
#plt.figure()
gamemapFat += numpy.max(numpy.max(abs(gamemapFat)))
gamemapFat *= 1.0/numpy.max(numpy.max(numpy.abs(gamemapFat)))
#im = plt.imshow(gamemapFat, interpolation='spline36',origin='image', cmap=cm.jet)
#plt.colorbar()
plt.figure()
gamemap = (weight*gamemap+gamemapFat)/(weight+1)
gamemap = gamemap/numpy.max(numpy.max(gamemap))
im = plt.imshow(gamemap,interpolation='bicubic', cmap=cm.jet)
plt.colorbar()
plt.show()
print xe, ye
# de-normalize the rssi information
gamemap = gamemap*rssiMax
numpy.savetxt("forJennyIns.out", gamemap, delimiter=" ")
numpy.savetxt("forJennyIns.csv", gamemap, delimiter=",")
# First entry corresponds to index 1, second entry corresponds to index 2 etc
numpy.savetxt("xSpace.out", xlin, delimiter = " ")
numpy.savetxt("ySpace.out", ylin, delimiter = " ")
#for i in range(xres):
# for j in range(yres):
# print gamemap[i,j]
#numpy.savetxt("xSpace.out", xlin, delimiter=" ")
#numpy.savetxt("yypace.out", ylin, delimiter=" ")
| |
#!/usr/bin/env python3
import datetime
import json
import logging
import time
import requests
from lenderbot import Loan
class Investor:
"""A simple class to interact with your LendingClub account."""
def __init__(self, iid, auth_key, invest_amt=25, production_mode=False):
self.iid = iid
self.headers = {'Authorization': auth_key, 'Accept': 'application/json', 'Content-type': 'application/json'}
self.endpoint_root = 'https://api.lendingclub.com/api/investor/v1/'
self.invest_amt = invest_amt
self.production_mode = production_mode
self.logger = logging.getLogger(__name__)
self.time_delay = datetime.timedelta(seconds=1) # We must wait one second between requests
self.last_request_ts = datetime.datetime.min # No requests have been made yet
self.max_log_len = 1024
self.filters = []
self.my_note_ids = [x['loanId'] for x in self.get_notes_owned()]
def __set_ts(self):
self.last_request_ts = datetime.datetime.now()
return
def __get_ts(self):
return self.last_request_ts
def __execute_delay(self):
cur_time = datetime.datetime.now()
delta = cur_time - self.__get_ts()
if delta < self.time_delay:
# Round up sleep time to the nearest second
sleep_time = (delta + datetime.timedelta(milliseconds=999)).seconds
time.sleep(sleep_time)
return
def __execute_get(self, url, log=True):
self.__execute_delay()
endpoint = self.endpoint_root + url
response = requests.get(endpoint, headers=self.headers)
self.__set_ts()
if log and len(response.text) < self.max_log_len:
self.logger.debug('-------- GET BEGIN --------')
self.logger.debug('Endpoint: %s', endpoint)
self.logger.debug('Headers: %s', self.headers)
self.logger.debug('Response: %s | %s', response, response.text)
self.logger.debug('--------- GET END ---------')
try:
# We expect a valid JSON response
return json.loads(str(response.text))
except:
# We received a garbage response. Log error and return None
self.logger.warning('Get failed. Response text: \'%s\'', response.text)
return None
def __execute_post(self, url, payload=None, log=True):
self.__execute_delay()
endpoint = self.endpoint_root + url
response = requests.post(endpoint, data=payload, headers=self.headers)
self.__set_ts()
if log and len(response.text) < self.max_log_len:
self.logger.debug('-------- POST BEGIN --------')
self.logger.debug('Endpoint: %s', endpoint)
self.logger.debug('Data: %s', payload)
self.logger.debug('Headers: %s', self.headers)
self.logger.debug('Response: %s | %s', response, response.text)
self.logger.debug('--------- POST END ---------')
try:
# We expect a valid JSON response
return json.loads(response.text)
except:
# We received a garbage response. Log error and return None
self.logger.warning('Post failed. Response text: \'%s\'', response.text)
return None
def get_loans(self, showAll=False):
loans = []
listings = self.__execute_get('loans/listing?showAll=%s' % (showAll))
if listings is not None and 'loans' in listings:
raw_loans = listings['loans']
loans = [Loan.InFundingLoan(raw_loan) for raw_loan in raw_loans]
return loans
def get_cash(self):
"""Retrieve available cash balance."""
cash = self.__execute_get('accounts/%s/availablecash' % (self.iid))
try:
return cash['availableCash']
except (TypeError, KeyError):
return 0
def get_notes_owned(self):
"""Retrieve basic information on currently owned notes."""
mynotes = self.__execute_get('accounts/%s/notes' % (self.iid))
try:
return [Loan.OwnedNote(raw_loan) for raw_loan in mynotes['myNotes']]
except (TypeError, KeyError):
return []
def get_detailed_notes_owned(self):
"""Retrieve detailed information on currently owned notes."""
mynotes = self.__execute_get('accounts/%s/detailednotes' % (self.iid))
try:
return [Loan.DetailedOwnedNote(raw_loan) for raw_loan in mynotes['myNotes']]
except (TypeError, KeyError):
return []
def submit_order(self, loans, portfolio=None, return_all=False):
"""Place a note order. Default behavior will return the execution status for successfully ordered notes."""
if self.production_mode:
# Portfolio parameter can either be a dictionary or portfolio ID
portfolio_id = None
if isinstance(portfolio, dict):
portfolio_id = portfolio['portfolioId']
elif isinstance(portfolio, str):
portfolio_id = portfolio
elif portfolio is not None:
self.logger.error('Invalid portfolio type passed to submit_order()')
# Construction order payload
if not isinstance(loans, list):
loans = [loans]
loan_dict = [{'loanId': loan['id'], 'requestedAmount': self.invest_amt} for loan in loans]
if portfolio_id:
for loan in loan_dict:
loan.update({'portfolioId': portfolio_id})
order = json.dumps({"aid": self.iid, "orders": loan_dict})
# Place order and return the order execution status
order_status = self.__execute_post('accounts/%s/orders' % (self.iid), payload=order)
try:
# An execution status for each note is listed under the 'orderConfirmations' key.
# Each execution status contains a list of attributes about how the order was (or
# wasn't) fulfilled. Return the set of execution status' that were successful.
success_status = [
'ORDER_FULFILLED',
'LOAN_AMNT_EXCEEDED',
'REQUESTED_AMNT_ROUNDED',
'AUGMENTED_BY_MERGE',
'NOTE_ADDED_TO_PORTFOLIO',
'NOT_A_VALID_PORTFOLIO',
'ERROR_ADDING_NOTE_TO_PORTFOLIO'
]
c = order_status['orderConfirmations']
if return_all:
return c
else:
return [es for es in c if set(es['executionStatus']).intersection(success_status)]
except (TypeError, KeyError):
return []
else:
self.logger.info('Running in test mode. Skipping loan order')
return []
def add_funds(self, amount):
"""Initiate bank transfer to fund account."""
if self.production_mode:
payload = json.dumps({'amount': amount, 'transferFrequency': 'LOAD_NOW'})
return self.__execute_post('accounts/%s/funds/add' % (self.iid), payload=payload)
else:
self.logger.info('Running in test mode. Skipping money transfer.')
return None
def get_pending_transfers(self):
"""Retrieve information on current pending bank transfers."""
xfers = self.__execute_get('accounts/%s/funds/pending' % (self.iid))
try:
return xfers['transfers']
except (TypeError, KeyError):
return []
def get_portfolios(self):
"""Retrieve information on all portfolios."""
portfolios = self.__execute_get('accounts/%s/portfolios' % (self.iid))
try:
return portfolios['myPortfolios']
except (TypeError, KeyError):
return []
def get_portfolio(self, name, create=False):
"""
Retrieve information on a specific portfolio.
Optionally create the portfolio if it does not exist.
"""
# Return requested portfolio, if it exists
portfolios = self.get_portfolios()
for p in portfolios:
if p['portfolioName'] == name:
return p
# Portfolio doesn't exist.
if create:
return self.create_portfolio(name)
return None
def create_portfolio(self, portfolio_name, portfolio_description=None):
"""Create a portfolio."""
if self.production_mode:
payload = json.dumps({'aid': self.iid, 'portfolioName': portfolio_name, 'portfolioDescription': portfolio_description})
return self.__execute_post('accounts/%d/portfolios' % (self.iid), payload=payload)
else:
self.logger.info('Running in test mode. Skipping portfolio creation.')
return None
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
from acq4.util import Qt
from ..Stage import Stage, MoveFuture, StageInterface
from acq4.drivers.Scientifica import Scientifica as ScientificaDriver
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from acq4.pyqtgraph import debug, ptime, SpinBox
class Scientifica(Stage):
"""
A Scientifica motorized device.
This class supports PatchStar, MicroStar, SliceScope, objective changers, etc.
The device may be identified either by its serial port or by its description
string:
port: <serial port> # eg. 'COM1' or '/dev/ttyACM0'
name: <string> # eg. 'SliceScope' or 'MicroStar 2'
baudrate: <int> # may be 9600 or 38400
The optional 'baudrate' parameter is used to set the baudrate of the device.
Both valid rates will be attempted when initially connecting.
"""
def __init__(self, man, config, name):
# can specify
port = config.pop('port', None)
name = config.pop('name', None)
self.scale = config.pop('scale', (1e-6, 1e-6, 1e-6))
baudrate = config.pop('baudrate', None)
ctrl_version = config.pop('version', 2)
try:
self.dev = ScientificaDriver(port=port, name=name, baudrate=baudrate, ctrl_version=ctrl_version)
except RuntimeError as err:
if hasattr(err, 'dev_version'):
raise RuntimeError(err.message + " You must add `version=%d` to the configuration for this device and double-check any speed/acceleration parameters." % int(err.dev_version))
else:
raise
# Controllers reset their baud to 9600 after power cycle
if baudrate is not None and self.dev.getBaudrate() != baudrate:
self.dev.setBaudrate(baudrate)
self._lastMove = None
man.sigAbortAll.connect(self.abort)
Stage.__init__(self, man, config, name)
# clear cached position for this device and re-read to generate an initial position update
self._lastPos = None
self.getPosition(refresh=True)
# Set approach angle
# Disabled--this toggles the approach bit and we can't reconfigure it from here :(
# approach = self.dev.send('APPROACH')
# self.dev.send('ANGLE %f' % self.pitch)
# self.dev.send('APPROACH %s' % approach) # reset approach bit; setting angle enables it
# set any extra parameters specified in the config
params = config.get('params', {})
for param, val in params.items():
if param == 'currents':
assert len(val) == 2
self.dev.setCurrents(*val)
elif param == 'axisScale':
assert len(val) == 3
for i, x in enumerate(val):
self.dev.setAxisScale(i, x)
else:
self.dev.setParam(param, val)
self.setUserSpeed(config.get('userSpeed', self.dev.getSpeed() * abs(self.scale[0])))
# whether to monitor for changes to a MOC
self.monitorObj = config.get('monitorObjective', False)
if self.monitorObj is True:
self.objectiveState = None
self._checkObjective()
# thread for polling position changes
self.monitor = MonitorThread(self, self.monitorObj)
self.monitor.start()
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
return {
'getPos': (True, True, True),
'setPos': (True, True, True),
'limits': (False, False, False),
}
def stop(self):
"""Stop the manipulator immediately.
"""
with self.lock:
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def abort(self):
"""Stop the manipulator immediately.
"""
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def setUserSpeed(self, v):
"""Set the maximum speed of the stage (m/sec) when under manual control.
The stage's maximum speed is reset to this value when it is not under
programmed control.
"""
self.userSpeed = v
self.dev.setSpeed(v / abs(self.scale[0]))
def _getPosition(self):
# Called by superclass when user requests position refresh
with self.lock:
pos = self.dev.getPos()
pos = [pos[i] * self.scale[i] for i in (0, 1, 2)]
if pos != self._lastPos:
self._lastPos = pos
emit = True
else:
emit = False
if emit:
# don't emit signal while locked
self.posChanged(pos)
return pos
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def quit(self):
self.monitor.stop()
Stage.quit(self)
def _move(self, abs, rel, speed, linear):
with self.lock:
if self._lastMove is not None and not self._lastMove.isDone():
self.stop()
pos = self._toAbsolutePosition(abs, rel)
speed = self._interpretSpeed(speed)
self._lastMove = ScientificaMoveFuture(self, pos, speed, self.userSpeed)
return self._lastMove
def deviceInterface(self, win):
return ScientificaGUI(self, win)
def startMoving(self, vel):
"""Begin moving the stage at a continuous velocity.
"""
# s = [int(-v * 1000. / 67. / self.scale[i]) for i,v in enumerate(vel)]
# print(s)
# self.dev.send('VJ %d %d %d C' % tuple(s))
s = [int(1e8 * v) for i,v in enumerate(vel)]
self.dev.send('VJ -%d %d %d' % tuple(s))
def _checkObjective(self):
with self.lock:
obj = int(self.dev.send('obj'))
if obj != self.objectiveState:
self.objectiveState = obj
self.sigSwitchChanged.emit(self, {'objective': obj})
def getSwitch(self, name):
if name == 'objective' and self.monitorObj:
return self.objectiveState
else:
return Stage.getSwitch(self, name)
class MonitorThread(Thread):
"""Thread to poll for manipulator position changes.
"""
def __init__(self, dev, monitorObj):
self.dev = dev
self.lock = Mutex(recursive=True)
self.monitorObj = monitorObj
self.stopped = False
self.interval = 0.3
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def setInterval(self, i):
with self.lock:
self.interval = i
def run(self):
minInterval = 100e-3
interval = minInterval
lastPos = None
while True:
try:
with self.lock:
if self.stopped:
break
maxInterval = self.interval
pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted
if pos != lastPos:
# if there was a change, then loop more rapidly for a short time.
interval = minInterval
lastPos = pos
else:
interval = min(maxInterval, interval*2)
if self.monitorObj is True:
self.dev._checkObjective()
time.sleep(interval)
except:
debug.printExc('Error in Scientifica monitor thread:')
time.sleep(maxInterval)
class ScientificaMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a Scientifica manipulator.
"""
def __init__(self, dev, pos, speed, userSpeed):
MoveFuture.__init__(self, dev, pos, speed)
self._interrupted = False
self._errorMSg = None
self._finished = False
pos = np.array(pos) / np.array(self.dev.scale)
with self.dev.dev.lock:
self.dev.dev.moveTo(pos, speed / abs(self.dev.scale[0]))
# reset to user speed immediately after starting move
# (the move itself will run with the previous speed)
self.dev.dev.setSpeed(userSpeed / abs(self.dev.scale[0]))
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus() != 0
def _getStatus(self):
# check status of move unless we already know it is complete.
# 0: still moving; 1: finished successfully; -1: finished unsuccessfully
if self._finished:
if self._interrupted:
return -1
else:
return 1
if self.dev.dev.isMoving():
# Still moving
return 0
# did we reach target?
pos = self.dev._getPosition()
dif = ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5
if dif < 2.5e-6:
# reached target
self._finished = True
return 1
else:
# missed
self._finished = True
self._interrupted = True
self._errorMsg = "Move did not complete (target=%s, position=%s, dif=%s)." % (self.targetPos, pos, dif)
return -1
def _stopped(self):
# Called when the manipulator is stopped, possibly interrupting this move.
status = self._getStatus()
if status == 1:
# finished; ignore stop
return
elif status == -1:
self._errorMsg = "Move was interrupted before completion."
elif status == 0:
# not actually stopped! This should not happen.
raise RuntimeError("Interrupted move but manipulator is still running!")
else:
raise Exception("Unknown status: %s" % status)
def errorMessage(self):
return self._errorMsg
class ScientificaGUI(StageInterface):
def __init__(self, dev, win):
StageInterface.__init__(self, dev, win)
# Insert Scientifica-specific controls into GUI
self.zeroBtn = Qt.QPushButton('Zero position')
self.layout.addWidget(self.zeroBtn, self.nextRow, 0, 1, 2)
self.nextRow += 1
self.psGroup = Qt.QGroupBox('Rotary Controller')
self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)
self.nextRow += 1
self.psLayout = Qt.QGridLayout()
self.psGroup.setLayout(self.psLayout)
self.speedLabel = Qt.QLabel('Speed')
self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, bounds=[1e-6, 10e-3])
self.psLayout.addWidget(self.speedLabel, 0, 0)
self.psLayout.addWidget(self.speedSpin, 0, 1)
self.zeroBtn.clicked.connect(self.dev.dev.zeroPosition)
self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(AureusTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print "Testing p2pkh and p2sh address index..."
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print "Testing querying txids by range of block heights.."
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print "Testing for txid uniqueness..."
addressHash = "6349a418fc4578d10a372b54b45c280cc8c4382f".decode("hex")
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print "Testing balances..."
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print "Testing balances after spending..."
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc".decode("hex")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print "Testing utxos..."
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print "Testing reorg..."
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print "Testing mempool indexing..."
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = "aa9872b5bbcdb511d89e0e11aa27da73fd2c3f50".decode("hex")
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = unspent[1]["amount"] * 100000000
tx2.vout = [
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey4),
CTxOut(amount / 4, scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(amount / 2 - 10000, scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = "c192bff751af8efec15135d42bfeedf91a6f3e34".decode("hex")
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = utxos[0]["satoshis"] - 1000
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print "Testing results with chain info..."
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print "Passed\n"
if __name__ == '__main__':
AddressIndexTest().main()
| |
"""Test different accessory types: Covers."""
from collections import namedtuple
import pytest
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
)
from homeassistant.components.homekit.const import (
ATTR_VALUE,
HK_DOOR_CLOSED,
HK_DOOR_CLOSING,
HK_DOOR_OPEN,
HK_DOOR_OPENING,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
EVENT_HOMEASSISTANT_START,
SERVICE_SET_COVER_TILT_POSITION,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry
from tests.common import async_mock_service
from tests.components.homekit.common import patch_debounce
@pytest.fixture(scope="module")
def cls():
"""Patch debounce decorator during import of type_covers."""
patcher = patch_debounce()
patcher.start()
_import = __import__(
"homeassistant.components.homekit.type_covers",
fromlist=["GarageDoorOpener", "WindowCovering", "WindowCoveringBasic"],
)
patcher_tuple = namedtuple("Cls", ["window", "window_basic", "garage"])
yield patcher_tuple(
window=_import.WindowCovering,
window_basic=_import.WindowCoveringBasic,
garage=_import.GarageDoorOpener,
)
patcher.stop()
async def test_garage_door_open_close(hass, hk_driver, cls, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "cover.garage_door"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = cls.garage(hass, hk_driver, "Garage Door", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 4 # GarageDoorOpener
assert acc.char_current_state.value == HK_DOOR_OPEN
assert acc.char_target_state.value == HK_DOOR_OPEN
hass.states.async_set(entity_id, STATE_CLOSED)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_CLOSED
assert acc.char_target_state.value == HK_DOOR_CLOSED
hass.states.async_set(entity_id, STATE_OPEN)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_OPEN
assert acc.char_target_state.value == HK_DOOR_OPEN
hass.states.async_set(entity_id, STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_OPEN
assert acc.char_target_state.value == HK_DOOR_OPEN
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_OPEN
assert acc.char_target_state.value == HK_DOOR_OPEN
# Set from HomeKit
call_close_cover = async_mock_service(hass, DOMAIN, "close_cover")
call_open_cover = async_mock_service(hass, DOMAIN, "open_cover")
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 1)
await hass.async_block_till_done()
assert call_close_cover
assert call_close_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_state.value == HK_DOOR_CLOSING
assert acc.char_target_state.value == HK_DOOR_CLOSED
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_CLOSED)
await hass.async_block_till_done()
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 1)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_CLOSED
assert acc.char_target_state.value == HK_DOOR_CLOSED
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 0)
await hass.async_block_till_done()
assert call_open_cover
assert call_open_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_state.value == HK_DOOR_OPENING
assert acc.char_target_state.value == HK_DOOR_OPEN
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_OPEN)
await hass.async_block_till_done()
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 0)
await hass.async_block_till_done()
assert acc.char_current_state.value == HK_DOOR_OPEN
assert acc.char_target_state.value == HK_DOOR_OPEN
assert len(events) == 4
assert events[-1].data[ATTR_VALUE] is None
async def test_window_set_cover_position(hass, hk_driver, cls, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "cover.window"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = cls.window(hass, hk_driver, "Cover", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 14 # WindowCovering
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_CURRENT_POSITION: None})
await hass.async_block_till_done()
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
hass.states.async_set(entity_id, STATE_OPENING, {ATTR_CURRENT_POSITION: 60})
await hass.async_block_till_done()
assert acc.char_current_position.value == 60
assert acc.char_target_position.value == 60
assert acc.char_position_state.value == 1
hass.states.async_set(entity_id, STATE_OPENING, {ATTR_CURRENT_POSITION: 70.0})
await hass.async_block_till_done()
assert acc.char_current_position.value == 70
assert acc.char_target_position.value == 70
assert acc.char_position_state.value == 1
hass.states.async_set(entity_id, STATE_CLOSING, {ATTR_CURRENT_POSITION: 50})
await hass.async_block_till_done()
assert acc.char_current_position.value == 50
assert acc.char_target_position.value == 50
assert acc.char_position_state.value == 0
hass.states.async_set(entity_id, STATE_OPEN, {ATTR_CURRENT_POSITION: 50})
await hass.async_block_till_done()
assert acc.char_current_position.value == 50
assert acc.char_target_position.value == 50
assert acc.char_position_state.value == 2
# Set from HomeKit
call_set_cover_position = async_mock_service(hass, DOMAIN, "set_cover_position")
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 25)
await hass.async_block_till_done()
assert call_set_cover_position[0]
assert call_set_cover_position[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_cover_position[0].data[ATTR_POSITION] == 25
assert acc.char_current_position.value == 50
assert acc.char_target_position.value == 25
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == 25
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 75)
await hass.async_block_till_done()
assert call_set_cover_position[1]
assert call_set_cover_position[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_cover_position[1].data[ATTR_POSITION] == 75
assert acc.char_current_position.value == 50
assert acc.char_target_position.value == 75
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == 75
async def test_window_cover_set_tilt(hass, hk_driver, cls, events):
"""Test if accessory and HA update slat tilt accordingly."""
entity_id = "cover.window"
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_SUPPORTED_FEATURES: SUPPORT_SET_TILT_POSITION}
)
await hass.async_block_till_done()
acc = cls.window(hass, hk_driver, "Cover", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 14 # CATEGORY_WINDOW_COVERING
assert acc.char_current_tilt.value == 0
assert acc.char_target_tilt.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_CURRENT_TILT_POSITION: None})
await hass.async_block_till_done()
assert acc.char_current_tilt.value == 0
assert acc.char_target_tilt.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_CURRENT_TILT_POSITION: 100})
await hass.async_block_till_done()
assert acc.char_current_tilt.value == 90
assert acc.char_target_tilt.value == 90
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_CURRENT_TILT_POSITION: 50})
await hass.async_block_till_done()
assert acc.char_current_tilt.value == 0
assert acc.char_target_tilt.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_CURRENT_TILT_POSITION: 0})
await hass.async_block_till_done()
assert acc.char_current_tilt.value == -90
assert acc.char_target_tilt.value == -90
# set from HomeKit
call_set_tilt_position = async_mock_service(
hass, DOMAIN, SERVICE_SET_COVER_TILT_POSITION
)
# HomeKit sets tilts between -90 and 90 (degrees), whereas
# Homeassistant expects a % between 0 and 100. Keep that in mind
# when comparing
await hass.async_add_executor_job(acc.char_target_tilt.client_update_value, 90)
await hass.async_block_till_done()
assert call_set_tilt_position[0]
assert call_set_tilt_position[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_tilt_position[0].data[ATTR_TILT_POSITION] == 100
assert acc.char_current_tilt.value == -90
assert acc.char_target_tilt.value == 90
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == 100
await hass.async_add_executor_job(acc.char_target_tilt.client_update_value, 45)
await hass.async_block_till_done()
assert call_set_tilt_position[1]
assert call_set_tilt_position[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_tilt_position[1].data[ATTR_TILT_POSITION] == 75
assert acc.char_current_tilt.value == -90
assert acc.char_target_tilt.value == 45
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == 75
async def test_window_open_close(hass, hk_driver, cls, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "cover.window"
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_SUPPORTED_FEATURES: 0})
acc = cls.window_basic(hass, hk_driver, "Cover", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 14 # WindowCovering
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
hass.states.async_set(entity_id, STATE_OPENING)
await hass.async_block_till_done()
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 1
hass.states.async_set(entity_id, STATE_OPEN)
await hass.async_block_till_done()
assert acc.char_current_position.value == 100
assert acc.char_target_position.value == 100
assert acc.char_position_state.value == 2
hass.states.async_set(entity_id, STATE_CLOSING)
await hass.async_block_till_done()
assert acc.char_current_position.value == 100
assert acc.char_target_position.value == 100
assert acc.char_position_state.value == 0
hass.states.async_set(entity_id, STATE_CLOSED)
await hass.async_block_till_done()
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
# Set from HomeKit
call_close_cover = async_mock_service(hass, DOMAIN, "close_cover")
call_open_cover = async_mock_service(hass, DOMAIN, "open_cover")
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 25)
await hass.async_block_till_done()
assert call_close_cover
assert call_close_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 90)
await hass.async_block_till_done()
assert call_open_cover[0]
assert call_open_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 100
assert acc.char_target_position.value == 100
assert acc.char_position_state.value == 2
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 55)
await hass.async_block_till_done()
assert call_open_cover[1]
assert call_open_cover[1].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 100
assert acc.char_target_position.value == 100
assert acc.char_position_state.value == 2
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
async def test_window_open_close_stop(hass, hk_driver, cls, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "cover.window"
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_SUPPORTED_FEATURES: SUPPORT_STOP}
)
acc = cls.window_basic(hass, hk_driver, "Cover", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
# Set from HomeKit
call_close_cover = async_mock_service(hass, DOMAIN, "close_cover")
call_open_cover = async_mock_service(hass, DOMAIN, "open_cover")
call_stop_cover = async_mock_service(hass, DOMAIN, "stop_cover")
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 25)
await hass.async_block_till_done()
assert call_close_cover
assert call_close_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 0
assert acc.char_target_position.value == 0
assert acc.char_position_state.value == 2
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 90)
await hass.async_block_till_done()
assert call_open_cover
assert call_open_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 100
assert acc.char_target_position.value == 100
assert acc.char_position_state.value == 2
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_position.client_update_value, 55)
await hass.async_block_till_done()
assert call_stop_cover
assert call_stop_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_current_position.value == 50
assert acc.char_target_position.value == 50
assert acc.char_position_state.value == 2
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
async def test_window_open_close_with_position_and_stop(hass, hk_driver, cls, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "cover.stop_window"
hass.states.async_set(
entity_id,
STATE_UNKNOWN,
{ATTR_SUPPORTED_FEATURES: SUPPORT_STOP | SUPPORT_SET_POSITION},
)
acc = cls.window(hass, hk_driver, "Cover", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
# Set from HomeKit
call_stop_cover = async_mock_service(hass, DOMAIN, "stop_cover")
await hass.async_add_executor_job(acc.char_hold_position.client_update_value, 0)
await hass.async_block_till_done()
assert not call_stop_cover
await hass.async_add_executor_job(acc.char_hold_position.client_update_value, 1)
await hass.async_block_till_done()
assert call_stop_cover
assert call_stop_cover[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_hold_position.value == 1
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
async def test_window_basic_restore(hass, hk_driver, cls, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"cover",
"generic",
"1234",
suggested_object_id="simple",
)
registry.async_get_or_create(
"cover",
"generic",
"9012",
suggested_object_id="all_info_set",
capabilities={},
supported_features=SUPPORT_STOP,
device_class="mock-device-class",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = cls.window_basic(hass, hk_driver, "Cover", "cover.simple", 2, None)
assert acc.category == 14
assert acc.char_current_position is not None
assert acc.char_target_position is not None
assert acc.char_position_state is not None
acc = cls.window_basic(hass, hk_driver, "Cover", "cover.all_info_set", 2, None)
assert acc.category == 14
assert acc.char_current_position is not None
assert acc.char_target_position is not None
assert acc.char_position_state is not None
async def test_window_restore(hass, hk_driver, cls, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"cover",
"generic",
"1234",
suggested_object_id="simple",
)
registry.async_get_or_create(
"cover",
"generic",
"9012",
suggested_object_id="all_info_set",
capabilities={},
supported_features=SUPPORT_STOP,
device_class="mock-device-class",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = cls.window(hass, hk_driver, "Cover", "cover.simple", 2, None)
assert acc.category == 14
assert acc.char_current_position is not None
assert acc.char_target_position is not None
assert acc.char_position_state is not None
acc = cls.window(hass, hk_driver, "Cover", "cover.all_info_set", 2, None)
assert acc.category == 14
assert acc.char_current_position is not None
assert acc.char_target_position is not None
assert acc.char_position_state is not None
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from decimal import Decimal
from test_framework.blocktools import (
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlock,
CBlockHeader,
BLOCK_HEADER_SIZE
)
from test_framework.mininode import (
P2PDataStore,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from test_framework.script import CScriptNum
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate(template_request={'data': block.serialize().hex(), 'mode': 'proposal', 'rules': ['segwit']})
assert_equal(rsp, expect)
class MiningTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.supports_cli = False
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
self.nodes[0].setmocktime(t)
self.nodes[0].generate(1)
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 4000)
self.restart_node(0)
connect_nodes(self.nodes[0], 1)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.mine_chain()
node = self.nodes[0]
def assert_submitblock(block, result_str_1, result_str_2=None):
block.solve()
result_str_2 = result_str_2 or 'duplicate-invalid'
assert_equal(result_str_1, node.submitblock(hexdata=block.serialize().hex()))
assert_equal(result_str_2, node.submitblock(hexdata=block.serialize().hex()))
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], self.chain)
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tmpl = node.getblocktemplate({'rules': ['segwit']})
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
next_height = int(tmpl["height"])
coinbase_tx = create_coinbase(height=next_height)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
# round-trip the encoded bip34 block height commitment
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), next_height)
# round-trip negative and multi-byte CScriptNums to catch python regression
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))), 1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))), -1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1)
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: segwit rule must be set")
assert_raises_rpc_error(-8, "getblocktemplate must be called with the segwit rule set", node.getblocktemplate)
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex())
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, bad_block.serialize().hex())
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': block.serialize()[:-1].hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
assert_submitblock(bad_block, 'bad-txns-duplicate', 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
assert_submitblock(bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1)
bad_block_sn[BLOCK_HEADER_SIZE] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': bad_block_sn.hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot')
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
assert_submitblock(bad_block, 'time-too-new', 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
assert_submitblock(bad_block, 'time-too-old', 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
assert_submitblock(bad_block, 'prev-blk-not-found', 'prev-blk-not-found')
self.log.info('submitheader tests')
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE))
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE-2)))
assert_raises_rpc_error(-25, 'Must submit previous header', lambda: node.submitheader(hexdata=super(CBlock, bad_block).serialize().hex()))
block.nTime += 1
block.solve()
def chain_tip(b_hash, *, status='headers-only', branchlen=1):
return {'hash': b_hash, 'height': 202, 'branchlen': branchlen, 'status': status}
assert chain_tip(block.hash) not in node.getchaintips()
node.submitheader(hexdata=block.serialize().hex())
assert chain_tip(block.hash) in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) # Noop
assert chain_tip(block.hash) in node.getchaintips()
bad_block_root = copy.deepcopy(block)
bad_block_root.hashMerkleRoot += 2
bad_block_root.solve()
assert chain_tip(bad_block_root.hash) not in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# Should still reject invalid blocks, even if we have the header:
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# We know the header for this invalid block, so should just return early without error:
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
bad_block_lock = copy.deepcopy(block)
bad_block_lock.vtx[0].nLockTime = 2**32 - 1
bad_block_lock.vtx[0].rehash()
bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root()
bad_block_lock.solve()
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal')
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid')
# Build a "good" block on top of the submitted bad block
bad_block2 = copy.deepcopy(block)
bad_block2.hashPrevBlock = bad_block_lock.sha256
bad_block2.solve()
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
# Should reject invalid header right away
bad_block_time = copy.deepcopy(block)
bad_block_time.nTime = 1
bad_block_time.solve()
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
# Should ask for the block from a p2p node, if they announce the header as well:
node.add_p2p_connection(P2PDataStore())
node.p2p.wait_for_getheaders(timeout=5) # Drop the first getheaders
node.p2p.send_blocks_and_test(blocks=[block], node=node)
# Must be active now:
assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips()
# Building a few blocks should give the same results
node.generatetoaddress(10, node.get_deterministic_priv_key().address)
assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
node.submitheader(hexdata=CBlockHeader(block).serialize().hex())
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert_equal(node.submitblock(hexdata=block.serialize().hex()), 'duplicate') # valid
if __name__ == '__main__':
MiningTest().main()
| |
''' Define some utils for all needs '''
import os
import re
import time
import yaml
import logging
from ts3observer import Configuration
from ts3observer.exc import NoConfigFileException, QueryFailedException, IncompletePlugin, KNOWN_TN_EIDS
def path(string):
''' Return a relative path to any file in this project, given by string '''
return '{}{}'.format(ts3o.base_path, string)
def plugin_is_disabled(plugin_name):
if plugin_name in ts3o.config['plugins']:
if not ts3o.config['plugins'][plugin_name]['enable']:
return True
return False
def get_available_plugins():
available_plugins = []
for f in os.listdir(ts3o.base_path + '/plugins'):
if f.endswith('.py') and f != '__init__.py':
plugin_name = os.path.splitext(f)[0]
if plugin_is_disabled(plugin_name):
continue
available_plugins.append(plugin_name)
return available_plugins
def plugin_is_new(plugin_name):
return not os.path.isfile('{}/conf/{}.yml'.format(ts3o.base_path, plugin_name))
def create_plugin_config(plugin_name, plugin_module):
config_string = yaml.dump(plugin_module.Config.yaml, default_flow_style=False)
with open('{}/conf/{}.yml'.format(ts3o.base_path, plugin_name), 'w') as cfg:
cfg.write(config_string)
with open('{}/conf/ts3observer.yml'.format(ts3o.base_path), 'r') as ocfg:
content = ocfg.read()
content = get_modified_config(content, plugin_name, plugin_module)
with open('{}/conf/ts3observer.yml'.format(ts3o.base_path), 'w') as ncfg:
ncfg.write(content)
def get_modified_config(content, plugin_name, plugin_module):
key = '# !-NOCOMMENTS-!'
mark = re.split(key, content)
top = mark[0]+key+'\n\n'
bottom = mark[1]
plugin_cfg = yaml.load(bottom)
plugin_cfg['plugins'].update({plugin_name: {'enable':plugin_module.Config.enable, 'interval':plugin_module.Config.interval}})
bottom = yaml.dump(plugin_cfg, default_flow_style=False)
return top+bottom
def get_plugin_config(plugin_name):
with open('{}/conf/{}.yml'.format(ts3o.base_path, plugin_name), 'r') as cfg:
config = yaml.load(cfg.read())
return config
def check_plugin_data(plugin_name, plugin_module, plugin_object):
data = {
'Meta': {
'author_name': str,
'author_email': str,
'version': str
},
'Config': {
'enable': bool,
'interval': int,
'yaml': dict,
}
}
for cls, attrs in data.items():
if not hasattr(plugin_module, cls):
raise IncompletePlugin(plugin_name, '{}class'.format(cls))
for attr, inst in attrs.items():
if not hasattr(getattr(plugin_module, cls), attr):
raise IncompletePlugin(plugin_name, '{}class \'{}\''.format(cls, attr))
if not isinstance(getattr(getattr(plugin_module, cls), attr), inst):
raise IncompletePlugin(plugin_name, '{}class \'{}\' is not an \'{}\' instance'.format(cls, attr, inst))
def get_and_set_global_config():
''' Get the global configuration and store it in the ts3o object.
Do not call before a logging config is set because of the beauty.
'''
try:
ts3o.config = Configuration(path('/conf/ts3observer.yml'))
except IOError:
raise NoConfigFileException()
def get_loglevel():
if ts3o.args.verbose:
return logging.DEBUG
if ts3o.args.quiet:
return logging.CRITICAL
return logging.INFO
def control_cycles(start_timestamp, end_timestamp):
cycle_interval = 1
needed_time = end_timestamp - start_timestamp
if needed_time < 1:
time.sleep(1 - needed_time)
class TelnetUtils(object):
''' Provide som eutils for the telnet connection '''
@staticmethod
def string_to_dict(arg_str):
''' Map a string to a property dict '''
pairs = arg_str.replace('error id', 'error_id', 1).split(' ')
properties = {}
for pair in pairs:
if '=' in pair:
segments = pair.split('=', 1)
properties.update({segments[0]: segments[1]})
else:
properties.update({pair: None})
return properties
@staticmethod
def check_dev_modus(fn):
''' use as decorator '''
def log_only(self, *args, **kwargs):
logging.debug(args[0])
return ''
def wrapper(self, *args, **kwargs):
if ts3o.args.dev:
return log_only(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
return wrapper
@staticmethod
def validate_result(command, result):
if not 'msg=ok' in result:
response = TelnetUtils.string_to_dict(result)
error_id = int(response['error_id'])
error_msg = Escaper.decode(response['msg'])
if error_id in KNOWN_TN_EIDS:
raise KNOWN_TN_EIDS[error_id](command)
else:
raise QueryFailedException(msg='ErrorID: {}, ErrorMsg: \'{}\''.format(error_id, error_msg))
return result
@staticmethod
def remove_linebreaks(string):
''' Remove unnecessary linebreaks (\r or \n) '''
return string.replace('\n', ' ').replace('\r', ' ')
class Escaper(object):
''' Take care of teamspeak's special char escaping ...
Official documentation found here:
http://media.teamspeak.com/ts3_literature/TeamSpeak%203%20Server%20Query%20Manual.pdf
'''
escapetable = {
r'\\': chr(92), # \
r'\/': chr(47), # /
r'\s': chr(32), # Space
r'\p': chr(124), # |
r'\a': chr(7), # Bell
r'\b': chr(8), # Backspace
r'\f': chr(12), # Form Feed
r'\n': chr(10), # Newline
r'\r': chr(13), # Carriage Return
r'\t': chr(9), # Horizontal Tab
r'\v': chr(11), # Vertical tab
}
@classmethod
def encode(cls, string):
''' Escape a normal string '''
for escaped_char, normal_char in cls.escapetable.items():
string = string.replace(normal_char, escaped_char)
return string
@staticmethod
def encode_attr(*args):
''' Escape a row of named attributes.
Designed to be used as decorator
'''
def attr_encoder(fn):
def wrapper(*func_args, **func_kwargs):
for arg in args:
func_kwargs[arg] = Escaper.encode(func_kwargs[arg])
return fn(*func_args, **func_kwargs)
return wrapper
return attr_encoder
@classmethod
def decode(cls, string):
''' Format a escaped string to normal one '''
for escaped_char in cls.escapetable:
string = string.replace(escaped_char, cls.escapetable[escaped_char])
return string
@staticmethod
def decode_attr(*args):
''' Format a row of named attributes of escaped string to normal ones.
Designed to be used as decorator
'''
def attr_decoder(fn):
def wrapper(*func_args, **func_kwargs):
for arg in args:
func_kwargs[arg] = Escaper.decode(func_kwargs[arg])
return fn(*func_args, **func_kwargs)
return wrapper
return attr_decoder
| |
import feedparser
from datetime import datetime
import time
import socket
from threading import Thread
import json
import os
from cl_email import CL_Email
from cl_post import CL_Post
from log import Logging
def CheckNotityInterval(notification_intervals):
for notify_time, notify_sent_flag in notification_intervals.items():
if notify_sent_flag:
if UnixTime(notify_time) <= int(time.time()):
notification_intervals[notify_time] = not notify_sent_flag
return True
return False
def NewPost(post, data_config, cl_listings):
timestamp = UnixTime(post['published'])
for stored_post in cl_listings:
if post['id'] == stored_post.id:
Log.log('Duplicate ' + post['title'])
return False
notify_pair = [(k, v) for k, v in data_config['notification_intervals'].items()]
notify_pair = sorted(notify_pair, key=lambda x: (UnixTime(x[0])))
first_notify = notify_pair[0]
last_notify = notify_pair[len(notify_pair) - 1]
for notify_time, notify_sent_flag in notify_pair:
if notify_sent_flag:
if notify_time != first_notify[0]:
prior_interval = notify_pair[notify_pair.index((notify_time, notify_sent_flag)) - 1]
if not prior_interval[1] and notify_sent_flag:
if timestamp > UnixTime(prior_interval[0]):
return True
elif notify_time == first_notify[0]:
if timestamp >= UnixTime(last_notify[0]) - 86400:
return True
elif timestamp >= UnixTime(notify_time):
return True
return False
def UnixTime(time_element):
try:
ts = datetime.strptime(''.join(time_element.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
today = datetime.now().strftime('%Y-%m-%d') + 'T' + time_element
ts = datetime.strptime(''.join(today.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S%z')
return int(ts.strftime("%s"))
def ImageFilter(post, data_config):
if data_config['require_image']:
if 'enc_enclosure' not in post:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure missing')
return False
if 'resource' not in post['enc_enclosure']:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure/resource missing')
return False
if 'images' not in post['enc_enclosure']['resource']:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure/resource/images missing')
return False
return True
def PriceFilter(post, data_config):
split_title = post['title'].rsplit(';', 1)
if len(split_title) > 1:
price = int(split_title[len(split_title) - 1])
if int(data_config['minimum_price']) > price:
Log.log('Filtered ' + post['title'] + ' // Price too low, $' + str(price))
return False
elif int(data_config['maximum_price']) < price:
Log.log('Filtered ' + post['title'] + ' // Price too high, $' + str(price))
return False
else:
post['price'] = price
return True
Log.log('Filtered ' + post['title'] + ' // no price in post')
return False
def KeywordFilter(post, data_config):
keyword_matches = []
for keyword in data_config['keywords']:
if keyword.lower() in post['title'].lower():
if keyword.lower() not in keyword_matches:
keyword_matches.append(keyword.lower())
if keyword.lower() in post['summary'].lower():
if keyword.lower() not in keyword_matches:
keyword_matches.append(keyword.lower())
if len(keyword_matches) >= int(data_config['minimum_keyword_match']) or len(keyword_matches) == len(data_config['keywords']):
post['keyword_matches'] = keyword_matches
return True
else:
Log.log('Filtered ' + post['title'] + ', insufficient keyword matches')
return False
def ParseFeed(feed, data_config, cl_listings):
new_posts = 0
for post in feed['items']:
if ImageFilter(post, data_config):
if PriceFilter(post, data_config):
if NewPost(post, data_config, cl_listings):
if KeywordFilter(post, data_config):
post['title'] = post['title'].split('&#x', 1)[0]
new_post = CL_Post(post)
cl_listings.append(new_post)
new_posts += 1
Log.log(str(new_posts) + ' new posts detected')
def PullFeeds(location, category, result, index):
feed = feedparser.parse('http://' + location +'.craigslist.org/search/' + category + '?format=rss')
result[index] = feed
def UpdateIntervals(notify_pair):
interval_dict = {}
for notify_time, notify_sent_flag in notify_pair.items():
if UnixTime(notify_time) <= time.time():
interval_dict[notify_time] = False
else:
interval_dict[notify_time] = True
return interval_dict
def LoadJson(file_path):
try:
with open(file_path, 'r') as f:
content = json.load(f)
f.close()
return content
except IOError as err:
print(err)
def WriteJson(file_path, content):
with open(file_path, 'w') as f:
if type(content) == list:
f.write(json.dumps([j.__dict__ for j in content], indent=4, sort_keys=True))
elif type(content) == str:
str_as_json = json.loads(content)
content = json.dumps(str_as_json, indent=4, sort_keys=True)
f.write(content)
elif type(content) == dict:
content = json.dumps(content, indent=4, sort_keys=True)
f.write(content)
f.close()
def IsEmpty(file_path):
if os.stat(file_path).st_size == 0:
return True
return False
def MakeEmpty(file_path):
with open(file_path, 'w') as f:
pass
f.close()
def main():
data_config_file = 'data_config.json'
email_config_file = 'email_config.json'
stored_posts_file = 'stored_posts.json'
log_file = datetime.now().strftime('%Y-%m-%dT%H:%M:%S%z') + '.log'
global Log
Log = Logging(log_file)
data_config = LoadJson(data_config_file)
email_config = LoadJson(email_config_file)
if int(data_config['logging_enabled']):
Log.start()
cl_listings = []
if not IsEmpty(stored_posts_file):
sp = LoadJson(stored_posts_file)
[cl_listings.append(CL_Post(stored_post)) for stored_post in sp]
Log.log('Imported ' + str(len(cl_listings)) + ' saved posts')
socket.setdefaulttimeout(10)
threads_required = 0
for _ in data_config['locations']:
for __ in data_config['categories']:
threads_required += 1
threads = [None] * threads_required
results = [None] * threads_required
index = 0
for location in data_config['locations']:
for category in data_config['categories']:
threads[index] = Thread(target=PullFeeds, args=(location, category, results, index))
threads[index].start()
index += 1
[threads[i].join() for i in range(threads_required)]
[ParseFeed(feed, data_config, cl_listings) for feed in results]
if len(cl_listings) > 0:
if CheckNotityInterval(data_config['notification_intervals']):
email = CL_Email(email_config)
email.write(cl_listings)
email.send()
Log.log('Email sent to ' + str(email.recipient))
if not IsEmpty(stored_posts_file):
MakeEmpty(stored_posts_file)
Log.log('Emptied contents of ' + str(stored_posts_file))
else:
Log.log('Storing posts to ' + str(stored_posts_file))
WriteJson(stored_posts_file, cl_listings)
Log.log('Successful write to ' + str(stored_posts_file))
else:
Log.log('No new posts detected')
data_config['notification_intervals'] = UpdateIntervals(data_config['notification_intervals'])
WriteJson(data_config_file, data_config)
Log.log('Updated contents of ' + str(data_config_file))
if __name__ == '__main__':
while True:
main()
time.sleep(3600)
| |
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect, check_kurt_expect,
check_entropy, check_private_entropy, NUMPY_BELOW_1_7,
check_edge_support, check_named_args, check_random_state_property)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
## Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
# for testing only specific functions
# distcont = [
## ['fatiguelife', (29,)], #correction numargs = 1
## ['loggamma', (0.41411931826052117,)]]
# for testing ticket:767
# distcont = [
## ['genextreme', (3.3184017469423535,)],
## ['genextreme', (0.01,)],
## ['genextreme', (0.00001,)],
## ['genextreme', (0.0,)],
## ['genextreme', (-0.01,)]
## ]
# distcont = [['gumbel_l', ()],
## ['gumbel_r', ()],
## ['norm', ()]
## ]
# distcont = [['norm', ()]]
distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous',
'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone',
'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy',
'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy',
'johnsonsb', 'truncexpon', 'rice', 'invgauss', 'invgamma',
'powerlognorm']
distmiss = [[dist,args] for dist,args in distcont if dist in distmissing]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# NB: not needed anymore?
def _silence_fp_errors(func):
# warning: don't apply to test_ functions as is, then those will be skipped
def wrap(*a, **kw):
olderr = np.seterr(all='ignore')
try:
return func(*a, **kw)
finally:
np.seterr(**olderr)
wrap.__name__ = func.__name__
return wrap
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3}
x = spec_x.get(distname, 0.5)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
yield check_entropy, distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
knf = npt.dec.knownfailureif
yield knf(distname == 'truncnorm')(check_ppf_private), distfn, \
arg, distname
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn,*arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
# yield check_oth, distfn, arg # is still missing
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
x = 0.5
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
ks_cond = distname in ['ksone', 'kstwobign']
yield skp(ks_cond)(check_entropy), distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:]:
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
cond1, cond2 = distname in fail_normalization, distname in fail_higher
msg = distname + ' fails moments'
yield knf(cond1, msg)(check_normalization), distfn, arg, distname
yield knf(cond2, msg)(check_mean_expect), distfn, arg, m, distname
yield knf(cond2, msg)(check_var_expect), distfn, arg, m, v, distname
yield knf(cond2, msg)(check_skew_expect), distfn, arg, m, v, s, \
distname
yield knf(cond2, msg)(check_kurt_expect), distfn, arg, m, v, k, \
distname
yield check_loc_scale, distfn, arg, m, v, distname
yield check_moment, distfn, arg, m, v, distname
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if not np.isinf(m):
check_sample_mean(sm, sv, sn, m)
if not np.isinf(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm,v,n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = stats.betai(0.5*df, 0.5, df/(df+t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv,n, popvar):
# two-sided chisquare test for sample variance equal to hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.chisqprob(chi2,df)*2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2,pval,popvar,sv))
def check_cdf_ppf(distfn,arg,msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn,arg,msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg),
[0.1,0.5,0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg),
1.0-distfn.sf([0.1,0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship")
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship")
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship")
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D,pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
@npt.dec.skipif(NUMPY_BELOW_1_7)
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
#fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
if __name__ == "__main__":
npt.run_module_suite()
| |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from neutron import manager
from neutron.plugins.ml2 import managers
from neutron.tests import base
from neutron_lib import context
from neutron_lib.plugins import directory
from networking_l2gw.db.l2gateway import l2gateway_db
from networking_l2gw.db.l2gateway.ovsdb import lib
from networking_l2gw.services.l2gateway.common import constants as n_const
from networking_l2gw.services.l2gateway.common import ovsdb_schema
from networking_l2gw.services.l2gateway.common import tunnel_calls
from networking_l2gw.services.l2gateway.ovsdb import data
from networking_l2gw.services.l2gateway.service_drivers import agent_api
class TestL2GatewayOVSDBCallbacks(object):
def setUp(self):
super(TestL2GatewayOVSDBCallbacks, self).setUp()
self.context = context.get_admin_context()
def test_update_ovsdb_changes(self):
fake_activity = 1
fake_ovsdb_data = {n_const.OVSDB_IDENTIFIER: 'fake_id'}
with mock.patch.object(data, 'OVSDBData') as ovs_data:
self.l2gw_callbacks.update_ovsdb_changes(self.context,
fake_activity,
fake_ovsdb_data)
ovsdb_return_value = ovs_data.return_value
ovsdb_return_value.update_ovsdb_changes.assert_called_with(
self.context, fake_activity, fake_ovsdb_data)
def test_notify_ovsdb_states(self):
fake_ovsdb_states = {'ovsdb1': 'connected'}
with mock.patch.object(data, 'OVSDBData') as ovs_data:
self.l2gw_callbacks.notify_ovsdb_states(self.context,
fake_ovsdb_states)
ovsdb_return_value = ovs_data.return_value
ovsdb_return_value.notify_ovsdb_states.assert_called_with(
self.context, fake_ovsdb_states)
def test_get_ovsdbdata_object(self):
fake_ovsdb_id = 'fake_ovsdb_id'
with mock.patch.object(data, 'OVSDBData') as ovs_data:
ret_value = self.l2gw_callbacks.get_ovsdbdata_object(
fake_ovsdb_id)
ret_value1 = ovs_data.assert_called_with(fake_ovsdb_id)
self.assertEqual(ret_value, ret_value1)
class TestOVSDBData(base.BaseTestCase):
def setUp(self):
super(TestOVSDBData, self).setUp()
self.context = context.get_admin_context()
self.ovsdb_identifier = 'fake_ovsdb_id'
mock.patch.object(directory, 'get_plugin').start()
mock.patch.object(managers, 'TypeManager').start()
self.ovsdb_data = data.OVSDBData(self.ovsdb_identifier)
def test_init(self):
with mock.patch.object(data.OVSDBData,
'_setup_entry_table') as setup_entry_table:
self.ovsdb_data.__init__(self.ovsdb_identifier)
self.assertEqual('fake_ovsdb_id',
self.ovsdb_data.ovsdb_identifier)
self.assertTrue(setup_entry_table.called)
def test_update_ovsdb_changes(self):
fake_dict = {}
fake_activity = 1
fake_remote_mac = {'uuid': '123456',
'mac': 'mac123',
'ovsdb_identifier': 'host1',
'logical_switch_id': 'ls123'}
fake_new_logical_switches = [fake_dict]
fake_new_physical_port = [fake_dict]
fake_new_physical_switches = [fake_dict]
fake_new_physical_locators = [fake_dict]
fake_new_local_macs = [fake_dict]
fake_new_remote_macs = [fake_remote_mac]
fake_modified_remote_macs = [fake_dict]
fake_modified_physical_ports = [fake_dict]
fake_modified_local_macs = [fake_dict]
fake_deleted_logical_switches = [fake_dict]
fake_deleted_physical_ports = [fake_dict]
fake_deleted_physical_switches = [fake_dict]
fake_deleted_physical_locators = [fake_dict]
fake_deleted_local_macs = [fake_dict]
fake_deleted_remote_macs = [fake_dict]
fake_ovsdb_data = {
n_const.OVSDB_IDENTIFIER: 'fake_ovsdb_id',
'new_logical_switches': fake_new_logical_switches,
'new_physical_ports': fake_new_physical_port,
'new_physical_switches': fake_new_physical_switches,
'new_physical_locators': fake_new_physical_locators,
'new_local_macs': fake_new_local_macs,
'new_remote_macs': fake_new_remote_macs,
'modified_remote_macs': fake_modified_remote_macs,
'modified_physical_ports': fake_modified_physical_ports,
'modified_local_macs': fake_modified_local_macs,
'deleted_logical_switches': fake_deleted_logical_switches,
'deleted_physical_switches': fake_deleted_physical_switches,
'deleted_physical_ports': fake_deleted_physical_ports,
'deleted_physical_locators': fake_deleted_physical_locators,
'deleted_local_macs': fake_deleted_local_macs,
'deleted_remote_macs': fake_deleted_remote_macs}
with mock.patch.object(
self.ovsdb_data,
'_process_new_logical_switches'
) as process_new_logical_switches, \
mock.patch.object(
self.ovsdb_data,
'_process_new_physical_ports'
) as process_new_physical_ports, \
mock.patch.object(
self.ovsdb_data,
'_process_new_physical_switches'
) as process_new_physical_switches, \
mock.patch.object(
self.ovsdb_data,
'_process_new_physical_locators'
) as process_new_physical_locators, \
mock.patch.object(
self.ovsdb_data,
'_process_new_local_macs'
) as process_new_local_macs, \
mock.patch.object(
self.ovsdb_data,
'_process_new_remote_macs'
) as process_new_remote_macs, \
mock.patch.object(
self.ovsdb_data,
'_process_modified_remote_macs'
) as process_modified_remote_macs, \
mock.patch.object(
self.ovsdb_data,
'_process_modified_physical_ports'
) as process_modified_physical_ports, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_logical_switches'
) as process_deleted_logical_switches, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_physical_switches'
) as process_deleted_physical_switches, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_physical_ports'
) as process_deleted_physical_ports, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_physical_locators'
) as process_deleted_physical_locators, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_local_macs'
) as process_deleted_local_macs, \
mock.patch.object(
self.ovsdb_data,
'_process_deleted_remote_macs'
) as process_deleted_remote_macs, \
mock.patch.object(
self.ovsdb_data,
'_handle_l2pop') as mock_handle_l2pop:
self.ovsdb_data.entry_table = {
'new_logical_switches': process_new_logical_switches,
'new_physical_ports': process_new_physical_ports,
'new_physical_switches': process_new_physical_switches,
'new_physical_locators': process_new_physical_locators,
'new_local_macs': process_new_local_macs,
'new_remote_macs': process_new_remote_macs,
'modified_remote_macs': process_modified_remote_macs,
'modified_physical_ports': process_modified_physical_ports,
'deleted_logical_switches': process_deleted_logical_switches,
'deleted_physical_switches': process_deleted_physical_switches,
'deleted_physical_ports': process_deleted_physical_ports,
'deleted_physical_locators': process_deleted_physical_locators,
'deleted_local_macs': process_deleted_local_macs,
'deleted_remote_macs': process_deleted_remote_macs}
self.ovsdb_data.update_ovsdb_changes(
self.context, fake_activity, fake_ovsdb_data)
process_new_logical_switches.assert_called_with(
self.context, fake_new_logical_switches)
process_new_physical_ports.assert_called_with(
self.context, fake_new_physical_port)
process_new_physical_switches.assert_called_with(
self.context, fake_new_physical_switches)
process_new_physical_locators.assert_called_with(
self.context, fake_new_physical_locators)
process_new_local_macs.assert_called_with(
self.context, fake_new_local_macs)
process_new_remote_macs.assert_called_with(
self.context, fake_new_remote_macs)
process_modified_remote_macs.assert_called_with(
self.context, fake_modified_remote_macs)
process_modified_physical_ports.assert_called_with(
self.context, fake_modified_physical_ports)
process_deleted_logical_switches.assert_called_with(
self.context, fake_deleted_logical_switches)
process_deleted_physical_switches.assert_called_with(
self.context, fake_deleted_physical_switches)
process_deleted_physical_ports.assert_called_with(
self.context, fake_deleted_physical_ports)
process_deleted_physical_locators.assert_called_with(
self.context, fake_deleted_physical_locators)
process_deleted_local_macs.assert_called_with(
self.context, fake_deleted_local_macs)
process_deleted_remote_macs.assert_called_with(
self.context, fake_deleted_remote_macs)
self.assertTrue(mock_handle_l2pop.called)
@mock.patch.object(lib, 'get_all_pending_remote_macs_in_asc_order')
@mock.patch.object(lib, 'delete_pending_ucast_mac_remote')
@mock.patch.object(ovsdb_schema, 'LogicalSwitch')
@mock.patch.object(ovsdb_schema, 'PhysicalLocator')
@mock.patch.object(ovsdb_schema, 'UcastMacsRemote')
@mock.patch.object(agent_api.L2gatewayAgentApi, 'add_vif_to_gateway')
@mock.patch.object(agent_api.L2gatewayAgentApi, 'update_vif_to_gateway')
@mock.patch.object(agent_api.L2gatewayAgentApi, 'delete_vif_from_gateway')
def test_notify_ovsdb_states(self, mock_del_vif, mock_upd_vif,
mock_add_vif, mock_ucmr, mock_pl,
mock_ls, mock_del_pend_recs,
mock_get_pend_recs):
fake_ovsdb_states = {'ovsdb1': 'connected'}
fake_dict = {'logical_switch_uuid': 'fake_ls_id',
'mac': 'fake_mac',
'locator_uuid': 'fake_loc_id',
'dst_ip': 'fake_dst_ip',
'vm_ip': 'fake_vm_ip'}
fake_insert_dict = {'operation': 'insert'}
fake_insert_dict.update(fake_dict)
fake_update_dict = {'operation': 'update'}
fake_update_dict.update(fake_dict)
fake_delete_dict = {'operation': 'delete'}
fake_delete_dict.update(fake_dict)
mock_get_pend_recs.return_value = [fake_insert_dict]
self.ovsdb_data.notify_ovsdb_states(
self.context, fake_ovsdb_states)
self.assertTrue(mock_add_vif.called)
mock_get_pend_recs.return_value = [fake_update_dict]
self.ovsdb_data.notify_ovsdb_states(
self.context, fake_ovsdb_states)
self.assertTrue(mock_upd_vif.called)
mock_get_pend_recs.return_value = [fake_delete_dict]
self.ovsdb_data.notify_ovsdb_states(
self.context, fake_ovsdb_states)
self.assertTrue(mock_del_vif.called)
def test_process_new_logical_switches(self):
fake_dict = {}
fake_new_logical_switches = [fake_dict]
with mock.patch.object(lib, 'get_logical_switch',
return_value=None) as get_ls:
with mock.patch.object(lib,
'add_logical_switch') as add_ls:
self.ovsdb_data._process_new_logical_switches(
self.context, fake_new_logical_switches)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
get_ls.assert_called_with(self.context, fake_dict)
add_ls.assert_called_with(self.context, fake_dict)
def test_process_new_physical_switches(self):
fake_dict = {'tunnel_ip': ['set']}
fake_new_physical_switches = [fake_dict]
with mock.patch.object(lib, 'get_physical_switch',
return_value=None) as get_ps:
with mock.patch.object(lib,
'add_physical_switch') as add_ps:
self.ovsdb_data._process_new_physical_switches(
self.context, fake_new_physical_switches)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertIsNone(fake_dict['tunnel_ip'])
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
get_ps.assert_called_with(self.context, fake_dict)
add_ps.assert_called_with(self.context, fake_dict)
@mock.patch.object(lib, 'get_physical_port', return_value=None)
@mock.patch.object(lib, 'add_physical_port')
@mock.patch.object(lib, 'get_vlan_binding', return_value=None)
@mock.patch.object(lib, 'add_vlan_binding')
def test_process_new_physical_ports(self, add_vlan, get_vlan,
add_pp, get_pp):
fake_dict1 = {}
fake_dict2 = {'vlan_bindings': [fake_dict1]}
fake_new_physical_ports = [fake_dict2]
self.ovsdb_data._process_new_physical_ports(
self.context, fake_new_physical_ports)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict2)
self.assertEqual('fake_ovsdb_id',
fake_dict2[n_const.OVSDB_IDENTIFIER])
get_pp.assert_called_with(self.context, fake_dict2)
add_pp.assert_called_with(self.context, fake_dict2)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict1)
self.assertIn('port_uuid', fake_dict1)
get_vlan.assert_called_with(self.context, fake_dict1)
add_vlan.assert_called_with(self.context, fake_dict1)
def test_process_new_physical_locators(self):
fake_dict = {}
fake_new_physical_locators = [fake_dict]
with mock.patch.object(lib, 'get_physical_locator',
return_value=None) as get_pl:
with mock.patch.object(lib,
'add_physical_locator') as add_pl:
self.ovsdb_data._process_new_physical_locators(
self.context, fake_new_physical_locators)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
get_pl.assert_called_with(self.context, fake_dict)
add_pl.assert_called_with(self.context, fake_dict)
@mock.patch.object(lib, 'get_ucast_mac_local', return_value=None)
@mock.patch.object(lib, 'add_ucast_mac_local')
def test_process_new_local_macs(self, add_lm, get_lm):
fake_dict = {'uuid': '123456',
'mac': 'mac123',
'ovsdb_identifier': 'host1',
'logical_switch_id': 'ls123'}
fake_new_local_macs = [fake_dict]
self.ovsdb_data._process_new_local_macs(
self.context, fake_new_local_macs)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
get_lm.assert_called_with(self.context, fake_dict)
add_lm.assert_called_with(self.context, fake_dict)
def test_process_new_remote_macs(self):
fake_dict = {'logical_switch_id': 'ls123'}
fake_new_remote_macs = [fake_dict]
with mock.patch.object(lib, 'get_ucast_mac_remote',
return_value=None) as get_mr:
with mock.patch.object(lib,
'add_ucast_mac_remote') as add_mr:
self.ovsdb_data._process_new_remote_macs(
self.context, fake_new_remote_macs)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
get_mr.assert_called_with(self.context, fake_dict)
add_mr.assert_called_with(self.context, fake_dict)
def test_process_modified_remote_macs(self):
fake_dict = {'logical_switch_id': 'ls123'}
fake_modified_remote_macs = [fake_dict]
with mock.patch.object(lib,
'update_ucast_mac_remote') as update_mr:
self.ovsdb_data._process_modified_remote_macs(
self.context, fake_modified_remote_macs)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
update_mr.assert_called_with(self.context, fake_dict)
def test_process_deleted_logical_switches(self):
fake_dict = {}
fake_deleted_logical_switches = [fake_dict]
with mock.patch.object(lib, 'delete_logical_switch') as delete_ls:
self.ovsdb_data._process_deleted_logical_switches(
self.context, fake_deleted_logical_switches)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
delete_ls.assert_called_with(self.context, fake_dict)
def test_get_agent_by_mac(self):
fake_mac = {'mac': 'fake_mac_1'}
fake_port = [{'binding:host_id': 'fake_host'}]
with mock.patch.object(self.ovsdb_data, '_get_port_by_mac',
return_value=fake_port) as mock_get_port_mac, \
mock.patch.object(
self.ovsdb_data,
'_get_agent_details_by_host') as mock_get_agent_detail:
self.ovsdb_data._get_agent_by_mac(self.context, fake_mac)
mock_get_port_mac.assert_called_with(self.context, 'fake_mac_1')
mock_get_agent_detail.assert_called_with(self.context, 'fake_host')
def test_get_agent_details_by_host(self):
fake_agent = {'configurations': {'tunnel_types': ["vxlan"],
'l2_population': True}}
fake_agents = [fake_agent]
with mock.patch.object(self.ovsdb_data.core_plugin,
'get_agents',
return_value=fake_agents):
l2pop_enabled = self.ovsdb_data._get_agent_details_by_host(
self.context, 'fake_host')
self.assertTrue(l2pop_enabled)
def test_process_deleted_physical_switches(self):
fake_dict = {}
fake_deleted_physical_switches = [fake_dict]
fake_ls_dict = {'uuid': 'ls-uuid'}
fake_ls_list = [fake_ls_dict]
with mock.patch.object(lib, 'delete_physical_switch') as delete_ps, \
mock.patch.object(lib, 'get_all_physical_switches_by_ovsdb_id',
return_value=False) as get_ps, \
mock.patch.object(lib, 'get_all_logical_switches_by_ovsdb_id',
return_value=fake_ls_list) as get_ls, \
mock.patch.object(agent_api.L2gatewayAgentApi,
'delete_network') as del_network:
self.ovsdb_data._process_deleted_physical_switches(
self.context, fake_deleted_physical_switches)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
delete_ps.assert_called_with(self.context, fake_dict)
get_ps.assert_called_with(self.context, 'fake_ovsdb_id')
get_ls.assert_called_with(self.context, 'fake_ovsdb_id')
del_network.assert_called_with(self.context, 'fake_ovsdb_id',
'ls-uuid')
def test_process_deleted_physical_ports(self):
fake_dict = {'name': 'fake_uuid', 'uuid': 'fake_name'}
fake_deleted_physical_ports = [fake_dict]
fake_physical_port = {'uuid': 'fake_uuid',
'name': 'fake_name'}
fake_physical_switch = {'uuid': 'fake_uuid',
'ovsdb_identifier': 'fake_ovsdb_id',
'name': 'fake_switch'},
fake_vlan_binding = {'port_uuid:': 'fake_port_uuid',
'vlan': 'fake_vlan',
'logical_switch_uuid': 'fake_switch_uuid',
'ovsdb_identifier': 'fake_ovsdb_id'}
with mock.patch.object(lib,
'delete_physical_port'), \
mock.patch.object(lib,
'get_physical_port',
return_value=fake_physical_port), \
mock.patch.object(lib, 'get_physical_switch',
return_vaue=fake_physical_switch), \
mock.patch.object(lib,
'get_all_vlan_bindings_by_physical_port',
return_vaue=fake_vlan_binding), \
mock.patch.object(l2gateway_db.L2GatewayMixin,
'_get_l2gw_ids_by_interface_switch',
return_value=['fake_uuid']), \
mock.patch.object(
l2gateway_db.L2GatewayMixin,
'_delete_connection_by_l2gw_id') as l2gw_conn_del:
self.ovsdb_data._process_deleted_physical_ports(
self.context, fake_deleted_physical_ports)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
l2gw_conn_del.assert_called_with(self.context, 'fake_uuid')
@mock.patch.object(lib, 'delete_physical_port')
@mock.patch.object(lib, 'get_physical_port')
@mock.patch.object(lib, 'get_physical_switch')
@mock.patch.object(l2gateway_db.L2GatewayMixin,
'_get_l2gw_ids_by_interface_switch',
return_value=['fake_uuid'])
@mock.patch.object(l2gateway_db.L2GatewayMixin,
'_delete_connection_by_l2gw_id')
@mock.patch.object(lib,
'get_all_vlan_bindings_by_physical_port')
@mock.patch.object(lib,
'get_all_vlan_bindings_by_logical_switch')
@mock.patch.object(data.OVSDBData, '_delete_macs_from_ovsdb')
@mock.patch.object(lib, 'delete_vlan_binding')
def test_process_deleted_physical_ports_with_delete_macs(
self, del_vlan, del_macs, get_vlan_by_ls, get_vlan_by_pp,
l2gw_conn_del, get_l2gw, get_ps, get_pp, delete_pp):
fake_dict = {'uuid': 'fake_uuid', 'name': 'fake_name',
'logical_switch_id': 'fake_ls_id',
'ovsdb_identifier': 'fake_ovsdb_id'}
fake_deleted_physical_ports = [fake_dict]
fake_physical_port = {'uuid': 'fake_uuid',
'name': 'fake_name',
'ovsdb_identifier': 'fake_ovsdb_id'}
fake_physical_switch = {'uuid': 'fake_uuid',
'ovsdb_identifier': 'fake_ovsdb_id',
'name': 'fake_switch'}
vlan_binding_dict = {'logical_switch_uuid': 'fake_ls_id',
'ovsdb_identifier': 'fake_ovsdb_id',
'port_uuid': 'fake_uuid',
'vlan': 'fake_vlan',
'logical_switch_id': 'fake_ls_id'}
fake_vlan_binding_list = [vlan_binding_dict]
fake_binding_list = [vlan_binding_dict]
get_pp.return_value = fake_physical_port
get_ps.return_vaue = fake_physical_switch
get_vlan_by_pp.return_value = fake_vlan_binding_list
get_vlan_by_ls.return_value = fake_binding_list
self.ovsdb_data._process_deleted_physical_ports(
self.context, fake_deleted_physical_ports)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
l2gw_conn_del.assert_called_with(self.context, 'fake_uuid')
get_vlan_by_pp.assert_called_with(self.context, fake_dict)
del_vlan.assert_called_with(self.context, vlan_binding_dict)
get_vlan_by_ls.assert_called_with(self.context, vlan_binding_dict)
del_macs.assert_called_with(self.context,
'fake_ls_id', 'fake_ovsdb_id')
del_vlan.assert_called_with(self.context, vlan_binding_dict)
delete_pp.assert_called_with(self.context, fake_dict)
@mock.patch.object(data.OVSDBData,
'_get_logical_switch_ids',
return_value=['1'])
@mock.patch.object(lib,
'get_all_physical_switches_by_ovsdb_id',
return_value=[{'tunnel_ip': '3.3.3.3'}])
@mock.patch.object(data.OVSDBData,
'_get_fdb_entries')
@mock.patch.object(lib,
'delete_physical_locator')
@mock.patch.object(data.OVSDBData, '_get_agent_ips',
return_value={'1.1.1.1': 'hostname'})
@mock.patch.object(tunnel_calls.Tunnel_Calls,
'trigger_l2pop_delete')
def test_process_deleted_physical_locators(
self, trig_l2pop, get_agent_ips, delete_pl, get_fdb, get_all_ps,
get_ls):
"""Test case to test _process_deleted_physical_locators.
for unicast rpc to the L2 agent
"""
fake_dict1 = {'dst_ip': '1.1.1.1'}
fake_dict2 = {'dst_ip': '2.2.2.2'}
fake_deleted_physical_locators = [fake_dict2, fake_dict1]
mock.patch.object(manager, 'NeutronManager').start()
self.ovsdb_data._process_deleted_physical_locators(
self.context, fake_deleted_physical_locators)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict1)
self.assertTrue(get_ls.called)
self.assertTrue(get_all_ps.called)
self.assertTrue(get_fdb.called)
self.assertEqual('fake_ovsdb_id',
fake_dict1[n_const.OVSDB_IDENTIFIER])
delete_pl.assert_called_with(self.context, fake_dict1)
self.assertTrue(get_agent_ips.called)
trig_l2pop.assert_called_with(self.context,
mock.ANY,
'hostname')
@mock.patch.object(data.OVSDBData,
'_get_logical_switch_ids',
return_value=['1'])
@mock.patch.object(lib,
'get_all_physical_switches_by_ovsdb_id',
return_value=[{'tunnel_ip': '3.3.3.3'}])
@mock.patch.object(data.OVSDBData,
'_get_fdb_entries')
@mock.patch.object(lib,
'delete_physical_locator')
@mock.patch.object(data.OVSDBData, '_get_agent_ips',
return_value={'2.2.2.2': 'hostname'})
@mock.patch.object(tunnel_calls.Tunnel_Calls,
'trigger_l2pop_delete')
def test_process_deleted_physical_locators1(
self, trig_l2pop, get_agent_ips, delete_pl, get_fdb,
get_all_ps, get_ls):
"""Test case to test _process_deleted_physical_locators.
for broadcast rpc to the L2 agents
"""
fake_dict1 = {'dst_ip': '1.1.1.1'}
fake_deleted_physical_locators = [fake_dict1]
mock.patch.object(manager, 'NeutronManager').start()
self.ovsdb_data._process_deleted_physical_locators(
self.context, fake_deleted_physical_locators)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict1)
self.assertTrue(get_ls.called)
self.assertTrue(get_all_ps.called)
self.assertTrue(get_fdb.called)
self.assertEqual('fake_ovsdb_id',
fake_dict1[n_const.OVSDB_IDENTIFIER])
delete_pl.assert_called_once_with(self.context, fake_dict1)
self.assertTrue(get_agent_ips.called)
trig_l2pop.assert_called_with(self.context,
mock.ANY)
def test_process_deleted_local_macs(self):
fake_dict = {'uuid': '123456',
'mac': 'mac123',
'ovsdb_identifier': 'host1',
'logical_switch_id': 'ls123'}
fake_deleted_local_macs = [fake_dict]
with mock.patch.object(lib, 'delete_ucast_mac_local') as delete_ml:
with mock.patch.object(lib,
'get_ucast_mac_remote_by_mac_and_ls',
return_value=True):
self.ovsdb_data._process_deleted_local_macs(
self.context, fake_deleted_local_macs)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
delete_ml.assert_called_with(self.context, fake_dict)
def test_process_deleted_remote_macs(self):
fake_dict = {}
fake_deleted_remote_macs = [fake_dict]
with mock.patch.object(lib, 'delete_ucast_mac_remote') as delete_mr:
self.ovsdb_data._process_deleted_remote_macs(
self.context, fake_deleted_remote_macs)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict)
self.assertEqual('fake_ovsdb_id',
fake_dict[n_const.OVSDB_IDENTIFIER])
delete_mr.assert_called_with(self.context, fake_dict)
@mock.patch.object(lib, 'get_physical_port')
@mock.patch.object(lib, 'add_physical_port')
@mock.patch.object(lib, 'get_all_vlan_bindings_by_physical_port')
@mock.patch.object(lib, 'add_vlan_binding')
@mock.patch.object(lib, 'update_physical_ports_status')
def test_process_modified_physical_ports(self, update_pp_status, add_vlan,
get_vlan, add_pp, get_pp):
fake_dict1 = {}
fake_dict2 = {'vlan_bindings': [fake_dict1],
'uuid': 'fake_uuid'}
fake_modified_physical_ports = [fake_dict2]
self.ovsdb_data._process_modified_physical_ports(
self.context, fake_modified_physical_ports)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict2)
self.assertEqual('fake_ovsdb_id',
fake_dict2[n_const.OVSDB_IDENTIFIER])
get_pp.assert_called_with(self.context, fake_dict2)
update_pp_status.assert_called_with(self.context, fake_dict2)
self.assertFalse(add_pp.called)
get_vlan.assert_called_with(self.context, fake_dict2)
self.assertIn(n_const.OVSDB_IDENTIFIER, fake_dict1)
self.assertIn('port_uuid', fake_dict1)
add_vlan.assert_called_with(self.context, fake_dict1)
| |
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
from operator import itemgetter
import warnings
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types, viewkeys
import sqlalchemy as sa
from toolz import compose
from zipline.errors import (
MultipleSymbolsFound,
RootSymbolNotFound,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets import (
Asset, Equity, Future,
)
from zipline.assets.asset_writer import (
split_delimited_symbol,
check_version_info,
ASSET_DB_VERSION,
asset_db_table_names,
)
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
def _convert_asset_timestamp_fields(dict):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in (_asset_timestamp_fields & viewkeys(dict)):
value = pd.Timestamp(dict[key], tz='UTC')
dict[key] = None if pd.isnull(value) else value
class AssetFinder(object):
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder
PERSISTENT_TOKEN = "<AssetFinder>"
def __init__(self, engine):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def asset_type_by_sid(self, sid):
"""
Retrieve the asset type of a given sid.
"""
try:
return self._asset_type_cache[sid]
except KeyError:
pass
asset_type = sa.select((self.asset_router.c.asset_type,)).where(
self.asset_router.c.sid == int(sid),
).scalar()
if asset_type is not None:
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset object of a given sid.
"""
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
# Cache the asset if it has been retrieved
if asset is not None:
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid, default_none) for sid in sids]
def _retrieve_equity(self, sid):
"""
Retrieve the Equity object of a given sid.
"""
return self._retrieve_asset(
sid, self._equity_cache, self.equities, Equity,
)
def _retrieve_futures_contract(self, sid):
"""
Retrieve the Future object of a given sid.
"""
return self._retrieve_asset(
sid, self._future_cache, self.futures_contracts, Future,
)
@staticmethod
def _select_asset_by_sid(asset_tbl, sid):
return sa.select([asset_tbl]).where(asset_tbl.c.sid == int(sid))
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _retrieve_asset(self, sid, cache, asset_tbl, asset_type):
try:
return cache[sid]
except KeyError:
pass
data = self._select_asset_by_sid(asset_tbl, sid).execute().fetchone()
# Convert 'data' from a RowProxy object to a dict, to allow assignment
data = dict(data.items())
if data:
_convert_asset_timestamp_fields(data)
asset = asset_type(**data)
else:
asset = None
cache[sid] = asset
return asset
def _get_fuzzy_candidates(self, fuzzy_symbol):
candidates = sa.select(
(self.equities.c.sid,)
).where(self.equities.c.fuzzy_symbol == fuzzy_symbol).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc()
).execute().fetchall()
return candidates
def _get_fuzzy_candidates_in_range(self, fuzzy_symbol, ad_value):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.fuzzy_symbol == fuzzy_symbol,
self.equities.c.start_date <= ad_value,
self.equities.c.end_date >= ad_value
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_split_candidates_in_range(self,
company_symbol,
share_class_symbol,
ad_value):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol == share_class_symbol,
self.equities.c.start_date <= ad_value,
self.equities.c.end_date >= ad_value
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_split_candidates(self, company_symbol, share_class_symbol):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol == share_class_symbol
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _resolve_no_matching_candidates(self,
company_symbol,
share_class_symbol,
ad_value):
candidates = sa.select((self.equities.c.sid,)).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol ==
share_class_symbol,
self.equities.c.start_date <= ad_value),
).order_by(
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_best_candidate(self, candidates):
return self._retrieve_equity(candidates[0]['sid'])
def _get_equities_from_candidates(self, candidates):
return list(map(
compose(self._retrieve_equity, itemgetter('sid')),
candidates,
))
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
Return matching Equity of name symbol in database.
If multiple Equities are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Equity was active at as_of_date raises SymbolNotFound.
"""
company_symbol, share_class_symbol, fuzzy_symbol = \
split_delimited_symbol(symbol)
if as_of_date:
# Format inputs
as_of_date = pd.Timestamp(normalize_date(as_of_date))
ad_value = as_of_date.value
if fuzzy:
# Search for a single exact match on the fuzzy column
candidates = self._get_fuzzy_candidates_in_range(fuzzy_symbol,
ad_value)
# If exactly one SID exists for fuzzy_symbol, return that sid
if len(candidates) == 1:
return self._get_best_candidate(candidates)
# Search for exact matches of the split-up company_symbol and
# share_class_symbol
candidates = self._get_split_candidates_in_range(
company_symbol,
share_class_symbol,
ad_value
)
# If exactly one SID exists for symbol, return that symbol
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if candidates:
return self._get_best_candidate(candidates)
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
elif not candidates:
candidates = self._resolve_no_matching_candidates(
company_symbol,
share_class_symbol,
ad_value
)
if candidates:
return self._get_best_candidate(candidates)
raise SymbolNotFound(symbol=symbol)
else:
# If this is a fuzzy look-up, check if there is exactly one match
# for the fuzzy symbol
if fuzzy:
candidates = self._get_fuzzy_candidates(fuzzy_symbol)
if len(candidates) == 1:
return self._get_best_candidate(candidates)
candidates = self._get_split_candidates(company_symbol,
share_class_symbol)
if len(candidates) == 1:
return self._get_best_candidate(candidates)
elif not candidates:
raise SymbolNotFound(symbol=symbol)
else:
raise MultipleSymbolsFound(
symbol=symbol,
options=self._get_equities_from_candidates(candidates)
)
def lookup_future_symbol(self, symbol):
""" Return the Future object for a given symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
Future
A Future object.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
# If we find a contract, check whether it's been cached
try:
return self._future_cache[data['sid']]
except KeyError:
pass
# Build the Future object from its parameters
data = dict(data.items())
_convert_asset_timestamp_fields(data)
future = Future(**data)
# Cache the Future object.
self._future_cache[data['sid']] = future
return future
def lookup_future_chain(self, root_symbol, as_of_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date/expiration date is first
after this date is the primary contract, etc. If NaT is
given, the chain is unbounded, and all contracts for this
root symbol are returned.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
fc_cols = self.futures_contracts.c
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol),
).order_by(
fc_cols.notice_date.asc(),
).execute().fetchall()))
else:
as_of_date = as_of_date.value
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
# Filter to contracts that are still valid. If both
# exist, use the one that comes first in time (i.e.
# the lower value). If either notice_date or
# expiration_date is NaT, use the other. If both are
# NaT, the contract cannot be included in any chain.
sa.case(
[
(
fc_cols.notice_date == pd.NaT.value,
fc_cols.expiration_date >= as_of_date
),
(
fc_cols.expiration_date == pd.NaT.value,
fc_cols.notice_date >= as_of_date
)
],
else_=(
sa.func.min(
fc_cols.notice_date,
fc_cols.expiration_date
) >= as_of_date
)
)
).order_by(
# Sort using expiration_date if valid. If it's NaT,
# use notice_date instead.
sa.case(
[
(
fc_cols.expiration_date == pd.NaT.value,
fc_cols.notice_date
)
],
else_=fc_cols.expiration_date
).asc()
).execute().fetchall()
))
if not sids:
# Check if root symbol exists.
count = sa.select((sa.func.count(fc_cols.sid),)).where(
fc_cols.root_symbol == root_symbol,
).scalar()
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
return list(map(self._retrieve_futures_contract, sids))
@property
def sids(self):
return tuple(map(
itemgetter('sid'),
sa.select((self.asset_router.c.sid,)).execute().fetchall(),
))
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol(asset_convertible, as_of_date)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class AssetFinderCachedEquities(AssetFinder):
"""
An extension to AssetFinder that loads all equities from equities table
into memory and overrides the methods that lookup_symbol uses to look up
those equities.
"""
def __init__(self, engine):
super(AssetFinderCachedEquities, self).__init__(engine)
self.fuzzy_symbol_hashed_equities = {}
self.company_share_class_hashed_equities = {}
self.hashed_equities = sa.select(self.equities.c).execute().fetchall()
self._load_hashed_equities()
def _load_hashed_equities(self):
"""
Populates two maps - fuzzy symbol to list of equities having that
fuzzy symbol and company symbol/share class symbol to list of
equities having that combination of company symbol/share class symbol.
"""
for equity in self.hashed_equities:
company_symbol = equity['company_symbol']
share_class_symbol = equity['share_class_symbol']
fuzzy_symbol = equity['fuzzy_symbol']
asset = self._convert_row_to_equity(equity)
self.company_share_class_hashed_equities.setdefault(
(company_symbol, share_class_symbol),
[]
).append(asset)
self.fuzzy_symbol_hashed_equities.setdefault(
fuzzy_symbol, []
).append(asset)
def _convert_row_to_equity(self, equity):
"""
Converts a SQLAlchemy equity row to an Equity object.
"""
data = dict(equity.items())
_convert_asset_timestamp_fields(data)
asset = Equity(**data)
return asset
def _get_fuzzy_candidates(self, fuzzy_symbol):
if fuzzy_symbol in self.fuzzy_symbol_hashed_equities:
return self.fuzzy_symbol_hashed_equities[fuzzy_symbol]
return []
def _get_fuzzy_candidates_in_range(self, fuzzy_symbol, ad_value):
equities = self._get_fuzzy_candidates(fuzzy_symbol)
fuzzy_candidates = []
for equity in equities:
if (equity.start_date.value <=
ad_value <=
equity.end_date.value):
fuzzy_candidates.append(equity)
return fuzzy_candidates
def _get_split_candidates(self, company_symbol, share_class_symbol):
if (company_symbol, share_class_symbol) in \
self.company_share_class_hashed_equities:
return self.company_share_class_hashed_equities[(
company_symbol, share_class_symbol)]
return []
def _get_split_candidates_in_range(self,
company_symbol,
share_class_symbol,
ad_value):
equities = self._get_split_candidates(
company_symbol, share_class_symbol
)
best_candidates = []
for equity in equities:
if (equity.start_date.value <=
ad_value <=
equity.end_date.value):
best_candidates.append(equity)
if best_candidates:
best_candidates = sorted(
best_candidates,
key=lambda x: (x.start_date, x.end_date),
reverse=True
)
return best_candidates
def _resolve_no_matching_candidates(self,
company_symbol,
share_class_symbol,
ad_value):
equities = self._get_split_candidates(
company_symbol, share_class_symbol
)
partial_candidates = []
for equity in equities:
if equity.start_date.value <= ad_value:
partial_candidates.append(equity)
if partial_candidates:
partial_candidates = sorted(
partial_candidates,
key=lambda x: x.end_date,
reverse=True
)
return partial_candidates
def _get_best_candidate(self, candidates):
return candidates[0]
def _get_equities_from_candidates(self, candidates):
return candidates
| |
#!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~IPython.core.application.Application` object for the command
line :command:`ipython` program.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import warnings
from traitlets.config.loader import Config
from traitlets.config.application import boolean_flag, catch_config_error, Application
from IPython.core import release
from IPython.core import usage
from IPython.core.completer import IPCompleter
from IPython.core.crashhandler import CrashHandler
from IPython.core.formatters import PlainTextFormatter
from IPython.core.history import HistoryManager
from IPython.core.application import (
ProfileDir, BaseIPythonApplication, base_flags, base_aliases
)
from IPython.core.magics import ScriptMagics
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.extensions.storemagic import StoreMagics
from .interactiveshell import TerminalInteractiveShell
from IPython.paths import get_ipython_dir
from traitlets import (
Bool, List, Dict, default, observe,
)
#-----------------------------------------------------------------------------
# Globals, utilities and helpers
#-----------------------------------------------------------------------------
_examples = """
ipython --matplotlib # enable matplotlib integration
ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
ipython --log-level=DEBUG # set logging to DEBUG
ipython --profile=foo # start with profile foo
ipython profile create foo # create profile foo w/ default config files
ipython help profile # show the help for the profile subcmd
ipython locate # print the path to the IPython directory
ipython locate profile foo # print the path to the directory for profile `foo`
"""
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit",
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
classic_config.InteractiveShell.separate_in = ''
classic_config.InteractiveShell.separate_out = ''
classic_config.InteractiveShell.separate_out2 = ''
classic_config.InteractiveShell.colors = 'NoColor'
classic_config.InteractiveShell.xmode = 'Plain'
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class LocateIPythonApp(BaseIPythonApplication):
description = """print the path to the IPython dir"""
subcommands = Dict(dict(
profile=('IPython.core.profileapp.ProfileLocate',
"print the path to an IPython profile directory",
),
))
def start(self):
if self.subapp is not None:
return self.subapp.start()
else:
print(self.ipython_dir)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = u'ipython'
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler
examples = _examples
flags = Dict(flags)
aliases = Dict(aliases)
classes = List()
@default('classes')
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
StoreMagics,
]
deprecated_subcommands = dict(
qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
),
notebook=('notebook.notebookapp.NotebookApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
),
console=('jupyter_console.app.ZMQTerminalIPythonApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
),
nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
"DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
),
trust=('nbformat.sign.TrustNotebookApp',
"DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
),
kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
"DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
),
)
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
deprecated_subcommands['install-nbextension'] = (
"notebook.nbextensions.InstallNBExtensionApp",
"DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
)
subcommands.update(deprecated_subcommands)
# *do* autocreate requested profile, but don't create the config file.
auto_create=Bool(True)
# configurables
quick = Bool(False,
help="""Start IPython quickly by skipping the loading of config files."""
).tag(config=True)
@observe('quick')
def _quick_changed(self, change):
if change['new']:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True,
help="Whether to display a banner upon starting IPython."
).tag(config=True)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
).tag(config=True)
@observe('force_interact')
def _force_interact_changed(self, change):
if change['new']:
self.interact = True
@observe('file_to_run', 'code_to_run', 'module_to_run')
def _file_to_run_changed(self, change):
new = change['new']
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
# internal, not-configurable
something_to_run=Bool(False)
def parse_command_line(self, argv=None):
"""override to allow old '-pylab' flag with deprecation warning"""
argv = sys.argv[1:] if argv is None else argv
if '-pylab' in argv:
# deprecated `-pylab` given,
# warn and transform into current syntax
argv = argv[:] # copy, don't clobber
idx = argv.index('-pylab')
warnings.warn("`-pylab` flag has been deprecated.\n"
" Use `--matplotlib <backend>` and import pylab manually.")
argv[idx] = '--pylab'
return super(TerminalIPythonApp, self).parse_command_line(argv)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print self.extra_args
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = TerminalInteractiveShell.instance(parent=self,
profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warnings.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
config = Config()
for cf in Application._load_config_files("ipython_config", path=profile_dir):
config.update(cf)
return config
launch_new_instance = TerminalIPythonApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
| |
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.db import common_db_mixin as base_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import firewall
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as const
LOG = logging.getLogger(__name__)
class FirewallRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall rule."""
__tablename__ = 'firewall_rules'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
shared = sa.Column(sa.Boolean)
protocol = sa.Column(sa.String(40))
ip_version = sa.Column(sa.Integer, nullable=False)
source_ip_address = sa.Column(sa.String(46))
destination_ip_address = sa.Column(sa.String(46))
source_port_range_min = sa.Column(sa.Integer)
source_port_range_max = sa.Column(sa.Integer)
destination_port_range_min = sa.Column(sa.Integer)
destination_port_range_max = sa.Column(sa.Integer)
action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action'))
enabled = sa.Column(sa.Boolean)
position = sa.Column(sa.Integer)
class Firewall(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall resource."""
__tablename__ = 'firewalls'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
admin_state_up = sa.Column(sa.Boolean)
status = sa.Column(sa.String(16))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete='CASCADE'),
nullable=True,
unique=True)
class FirewallPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall Policy resource."""
__tablename__ = 'firewall_policies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
firewall_rules = orm.relationship(
FirewallRule,
backref=orm.backref('firewall_policies', cascade='all, delete'),
order_by='FirewallRule.position',
collection_class=ordering_list('position', count_from=1))
audited = sa.Column(sa.Boolean)
firewalls = orm.relationship(Firewall, backref='firewall_policies')
class Firewall_db_mixin(firewall.FirewallPluginBase, base_db.CommonDbMixin):
"""Mixin class for Firewall DB implementation."""
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_firewall(self, context, id):
try:
return self._get_by_id(context, Firewall, id)
except exc.NoResultFound:
raise firewall.FirewallNotFound(firewall_id=id)
def _get_firewall_policy(self, context, id):
try:
return self._get_by_id(context, FirewallPolicy, id)
except exc.NoResultFound:
raise firewall.FirewallPolicyNotFound(firewall_policy_id=id)
def _get_firewall_rule(self, context, id):
try:
return self._get_by_id(context, FirewallRule, id)
except exc.NoResultFound:
raise firewall.FirewallRuleNotFound(firewall_rule_id=id)
def _make_firewall_dict(self, fw, fields=None):
res = {'id': fw['id'],
'tenant_id': fw['tenant_id'],
'name': fw['name'],
'description': fw['description'],
'shared': fw['shared'],
'admin_state_up': fw['admin_state_up'],
'status': fw['status'],
'firewall_policy_id': fw['firewall_policy_id']}
return self._fields(res, fields)
def _make_firewall_policy_dict(self, firewall_policy, fields=None):
fw_rules = [rule['id'] for rule in firewall_policy['firewall_rules']]
firewalls = [fw['id'] for fw in firewall_policy['firewalls']]
res = {'id': firewall_policy['id'],
'tenant_id': firewall_policy['tenant_id'],
'name': firewall_policy['name'],
'description': firewall_policy['description'],
'shared': firewall_policy['shared'],
'audited': firewall_policy['audited'],
'firewall_rules': fw_rules,
'firewall_list': firewalls}
return self._fields(res, fields)
def _make_firewall_rule_dict(self, firewall_rule, fields=None):
position = None
# We return the position only if the firewall_rule is bound to a
# firewall_policy.
if firewall_rule['firewall_policy_id']:
position = firewall_rule['position']
src_port_range = self._get_port_range_from_min_max_ports(
firewall_rule['source_port_range_min'],
firewall_rule['source_port_range_max'])
dst_port_range = self._get_port_range_from_min_max_ports(
firewall_rule['destination_port_range_min'],
firewall_rule['destination_port_range_max'])
res = {'id': firewall_rule['id'],
'tenant_id': firewall_rule['tenant_id'],
'name': firewall_rule['name'],
'description': firewall_rule['description'],
'firewall_policy_id': firewall_rule['firewall_policy_id'],
'shared': firewall_rule['shared'],
'protocol': firewall_rule['protocol'],
'ip_version': firewall_rule['ip_version'],
'source_ip_address': firewall_rule['source_ip_address'],
'destination_ip_address':
firewall_rule['destination_ip_address'],
'source_port': src_port_range,
'destination_port': dst_port_range,
'action': firewall_rule['action'],
'position': position,
'enabled': firewall_rule['enabled']}
return self._fields(res, fields)
def _set_rules_for_policy(self, context, firewall_policy_db, rule_id_list):
fwp_db = firewall_policy_db
with context.session.begin(subtransactions=True):
if not rule_id_list:
fwp_db.firewall_rules = []
fwp_db.audited = False
return
# We will first check if the new list of rules is valid
filters = {'id': [r_id for r_id in rule_id_list]}
rules_in_db = self._get_collection_query(context, FirewallRule,
filters=filters)
rules_dict = dict((fwr_db['id'], fwr_db) for fwr_db in rules_in_db)
for fwrule_id in rule_id_list:
if fwrule_id not in rules_dict:
# If we find an invalid rule in the list we
# do not perform the update since this breaks
# the integrity of this list.
raise firewall.FirewallRuleNotFound(firewall_rule_id=
fwrule_id)
elif rules_dict[fwrule_id]['firewall_policy_id']:
if (rules_dict[fwrule_id]['firewall_policy_id'] !=
fwp_db['id']):
raise firewall.FirewallRuleInUse(
firewall_rule_id=fwrule_id)
# New list of rules is valid so we will first reset the existing
# list and then add each rule in order.
# Note that the list could be empty in which case we interpret
# it as clearing existing rules.
fwp_db.firewall_rules = []
for fwrule_id in rule_id_list:
fwp_db.firewall_rules.append(rules_dict[fwrule_id])
fwp_db.firewall_rules.reorder()
fwp_db.audited = False
def _process_rule_for_policy(self, context, firewall_policy_id,
firewall_rule_db, position):
with context.session.begin(subtransactions=True):
fwp_query = context.session.query(
FirewallPolicy).with_lockmode('update')
fwp_db = fwp_query.filter_by(id=firewall_policy_id).one()
if position:
# Note that although position numbering starts at 1,
# internal ordering of the list starts at 0, so we compensate.
fwp_db.firewall_rules.insert(position - 1, firewall_rule_db)
else:
fwp_db.firewall_rules.remove(firewall_rule_db)
fwp_db.firewall_rules.reorder()
fwp_db.audited = False
return self._make_firewall_policy_dict(fwp_db)
def _get_min_max_ports_from_range(self, port_range):
if not port_range:
return [None, None]
min_port, sep, max_port = port_range.partition(":")
if not max_port:
max_port = min_port
return [int(min_port), int(max_port)]
def _get_port_range_from_min_max_ports(self, min_port, max_port):
if not min_port:
return None
if min_port == max_port:
return str(min_port)
else:
return '%d:%d' % (min_port, max_port)
def _validate_fwr_protocol_parameters(self, fwr):
protocol = fwr['protocol']
if protocol not in (const.TCP, const.UDP):
if fwr['source_port'] or fwr['destination_port']:
raise firewall.FirewallRuleInvalidICMPParameter(
param="Source, destination port")
def create_firewall(self, context, firewall):
LOG.debug(_("create_firewall() called"))
fw = firewall['firewall']
tenant_id = self._get_tenant_id_for_create(context, fw)
# distributed routers may required a more complex state machine;
# the introduction of a new 'CREATED' state allows this, whilst
# keeping a backward compatible behavior of the logical resource.
status = (const.CREATED
if cfg.CONF.router_distributed else const.PENDING_CREATE)
with context.session.begin(subtransactions=True):
firewall_db = Firewall(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fw['name'],
description=fw['description'],
firewall_policy_id=
fw['firewall_policy_id'],
admin_state_up=fw['admin_state_up'],
status=status)
context.session.add(firewall_db)
return self._make_firewall_dict(firewall_db)
def update_firewall(self, context, id, firewall):
LOG.debug(_("update_firewall() called"))
fw = firewall['firewall']
with context.session.begin(subtransactions=True):
count = context.session.query(Firewall).filter_by(id=id).update(fw)
if not count:
raise firewall.FirewallNotFound(firewall_id=id)
return self.get_firewall(context, id)
def delete_firewall(self, context, id):
LOG.debug(_("delete_firewall() called"))
with context.session.begin(subtransactions=True):
fw_query = context.session.query(
Firewall).with_lockmode('update')
firewall_db = fw_query.filter_by(id=id).one()
# Note: Plugin should ensure that it's okay to delete if the
# firewall is active
context.session.delete(firewall_db)
def get_firewall(self, context, id, fields=None):
LOG.debug(_("get_firewall() called"))
fw = self._get_firewall(context, id)
return self._make_firewall_dict(fw, fields)
def get_firewalls(self, context, filters=None, fields=None):
LOG.debug(_("get_firewalls() called"))
return self._get_collection(context, Firewall,
self._make_firewall_dict,
filters=filters, fields=fields)
def get_firewalls_count(self, context, filters=None):
LOG.debug(_("get_firewalls_count() called"))
return self._get_collection_count(context, Firewall,
filters=filters)
def create_firewall_policy(self, context, firewall_policy):
LOG.debug(_("create_firewall_policy() called"))
fwp = firewall_policy['firewall_policy']
tenant_id = self._get_tenant_id_for_create(context, fwp)
with context.session.begin(subtransactions=True):
fwp_db = FirewallPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fwp['name'],
description=fwp['description'],
shared=fwp['shared'])
context.session.add(fwp_db)
self._set_rules_for_policy(context, fwp_db,
fwp['firewall_rules'])
fwp_db.audited = fwp['audited']
return self._make_firewall_policy_dict(fwp_db)
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug(_("update_firewall_policy() called"))
fwp = firewall_policy['firewall_policy']
with context.session.begin(subtransactions=True):
fwp_db = self._get_firewall_policy(context, id)
if 'firewall_rules' in fwp:
self._set_rules_for_policy(context, fwp_db,
fwp['firewall_rules'])
del fwp['firewall_rules']
if 'audited' not in fwp or fwp['audited']:
fwp['audited'] = False
fwp_db.update(fwp)
return self._make_firewall_policy_dict(fwp_db)
def delete_firewall_policy(self, context, id):
LOG.debug(_("delete_firewall_policy() called"))
with context.session.begin(subtransactions=True):
fwp = self._get_firewall_policy(context, id)
# Ensure that the firewall_policy is not
# being used
qry = context.session.query(Firewall)
if qry.filter_by(firewall_policy_id=id).first():
raise firewall.FirewallPolicyInUse(firewall_policy_id=id)
else:
context.session.delete(fwp)
def get_firewall_policy(self, context, id, fields=None):
LOG.debug(_("get_firewall_policy() called"))
fwp = self._get_firewall_policy(context, id)
return self._make_firewall_policy_dict(fwp, fields)
def get_firewall_policies(self, context, filters=None, fields=None):
LOG.debug(_("get_firewall_policies() called"))
return self._get_collection(context, FirewallPolicy,
self._make_firewall_policy_dict,
filters=filters, fields=fields)
def get_firewalls_policies_count(self, context, filters=None):
LOG.debug(_("get_firewall_policies_count() called"))
return self._get_collection_count(context, FirewallPolicy,
filters=filters)
def create_firewall_rule(self, context, firewall_rule):
LOG.debug(_("create_firewall_rule() called"))
fwr = firewall_rule['firewall_rule']
self._validate_fwr_protocol_parameters(fwr)
tenant_id = self._get_tenant_id_for_create(context, fwr)
src_port_min, src_port_max = self._get_min_max_ports_from_range(
fwr['source_port'])
dst_port_min, dst_port_max = self._get_min_max_ports_from_range(
fwr['destination_port'])
with context.session.begin(subtransactions=True):
fwr_db = FirewallRule(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fwr['name'],
description=fwr['description'],
shared=fwr['shared'],
protocol=fwr['protocol'],
ip_version=fwr['ip_version'],
source_ip_address=fwr['source_ip_address'],
destination_ip_address=
fwr['destination_ip_address'],
source_port_range_min=src_port_min,
source_port_range_max=src_port_max,
destination_port_range_min=dst_port_min,
destination_port_range_max=dst_port_max,
action=fwr['action'],
enabled=fwr['enabled'])
context.session.add(fwr_db)
return self._make_firewall_rule_dict(fwr_db)
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug(_("update_firewall_rule() called"))
fwr = firewall_rule['firewall_rule']
if 'source_port' in fwr:
src_port_min, src_port_max = self._get_min_max_ports_from_range(
fwr['source_port'])
fwr['source_port_range_min'] = src_port_min
fwr['source_port_range_max'] = src_port_max
del fwr['source_port']
if 'destination_port' in fwr:
dst_port_min, dst_port_max = self._get_min_max_ports_from_range(
fwr['destination_port'])
fwr['destination_port_range_min'] = dst_port_min
fwr['destination_port_range_max'] = dst_port_max
del fwr['destination_port']
with context.session.begin(subtransactions=True):
fwr_db = self._get_firewall_rule(context, id)
fwr_db.update(fwr)
if fwr_db.firewall_policy_id:
fwp_db = self._get_firewall_policy(context,
fwr_db.firewall_policy_id)
fwp_db.audited = False
return self._make_firewall_rule_dict(fwr_db)
def delete_firewall_rule(self, context, id):
LOG.debug(_("delete_firewall_rule() called"))
with context.session.begin(subtransactions=True):
fwr = self._get_firewall_rule(context, id)
if fwr.firewall_policy_id:
raise firewall.FirewallRuleInUse(firewall_rule_id=id)
context.session.delete(fwr)
def get_firewall_rule(self, context, id, fields=None):
LOG.debug(_("get_firewall_rule() called"))
fwr = self._get_firewall_rule(context, id)
return self._make_firewall_rule_dict(fwr, fields)
def get_firewall_rules(self, context, filters=None, fields=None):
LOG.debug(_("get_firewall_rules() called"))
return self._get_collection(context, FirewallRule,
self._make_firewall_rule_dict,
filters=filters, fields=fields)
def get_firewalls_rules_count(self, context, filters=None):
LOG.debug(_("get_firewall_rules_count() called"))
return self._get_collection_count(context, FirewallRule,
filters=filters)
def _validate_insert_remove_rule_request(self, id, rule_info):
if not rule_info or 'firewall_rule_id' not in rule_info:
raise firewall.FirewallRuleInfoMissing()
def insert_rule(self, context, id, rule_info):
LOG.debug(_("insert_rule() called"))
self._validate_insert_remove_rule_request(id, rule_info)
firewall_rule_id = rule_info['firewall_rule_id']
insert_before = True
ref_firewall_rule_id = None
if not firewall_rule_id:
raise firewall.FirewallRuleNotFound(firewall_rule_id=None)
if 'insert_before' in rule_info:
ref_firewall_rule_id = rule_info['insert_before']
if not ref_firewall_rule_id and 'insert_after' in rule_info:
# If insert_before is set, we will ignore insert_after.
ref_firewall_rule_id = rule_info['insert_after']
insert_before = False
with context.session.begin(subtransactions=True):
fwr_db = self._get_firewall_rule(context, firewall_rule_id)
if fwr_db.firewall_policy_id:
raise firewall.FirewallRuleInUse(firewall_rule_id=fwr_db['id'])
if ref_firewall_rule_id:
# If reference_firewall_rule_id is set, the new rule
# is inserted depending on the value of insert_before.
# If insert_before is set, the new rule is inserted before
# reference_firewall_rule_id, and if it is not set the new
# rule is inserted after reference_firewall_rule_id.
ref_fwr_db = self._get_firewall_rule(
context, ref_firewall_rule_id)
if ref_fwr_db.firewall_policy_id != id:
raise firewall.FirewallRuleNotAssociatedWithPolicy(
firewall_rule_id=ref_fwr_db['id'],
firewall_policy_id=id)
if insert_before:
position = ref_fwr_db.position
else:
position = ref_fwr_db.position + 1
else:
# If reference_firewall_rule_id is not set, it is assumed
# that the new rule needs to be inserted at the top.
# insert_before field is ignored.
# So default insertion is always at the top.
# Also note that position numbering starts at 1.
position = 1
return self._process_rule_for_policy(context, id, fwr_db,
position)
def remove_rule(self, context, id, rule_info):
LOG.debug(_("remove_rule() called"))
self._validate_insert_remove_rule_request(id, rule_info)
firewall_rule_id = rule_info['firewall_rule_id']
if not firewall_rule_id:
raise firewall.FirewallRuleNotFound(firewall_rule_id=None)
with context.session.begin(subtransactions=True):
fwr_db = self._get_firewall_rule(context, firewall_rule_id)
if fwr_db.firewall_policy_id != id:
raise firewall.FirewallRuleNotAssociatedWithPolicy(
firewall_rule_id=fwr_db['id'],
firewall_policy_id=id)
return self._process_rule_for_policy(context, id, fwr_db, None)
| |
from __future__ import unicode_literals
import logging
import os
import shutil
import sys
import tempfile
from rbtools.utils.process import die
CONFIG_FILE = '.reviewboardrc'
tempfiles = []
tempdirs = []
builtin = {}
def is_exe_in_path(name):
"""Checks whether an executable is in the user's search path.
This expects a name without any system-specific executable extension.
It will append the proper extension as necessary. For example,
use "myapp" and not "myapp.exe".
This will return True if the app is in the path, or False otherwise.
Taken from djblets.util.filesystem to avoid an extra dependency
"""
if sys.platform == 'win32' and not name.endswith('.exe'):
name += '.exe'
for dir in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(dir, name)):
return True
return False
def cleanup_tempfiles():
for tmpfile in tempfiles:
try:
os.unlink(tmpfile)
except:
pass
for tmpdir in tempdirs:
shutil.rmtree(tmpdir, ignore_errors=True)
def _load_python_file(filename, config):
with open(filename) as f:
exec(compile(f.read(), filename, 'exec'), config)
return config
def make_tempfile(content=None):
"""Creates a temporary file and returns the path.
The path is stored in an array for later cleanup.
"""
fd, tmpfile = tempfile.mkstemp()
if content:
os.write(fd, content)
os.close(fd)
tempfiles.append(tmpfile)
return tmpfile
def make_tempdir(parent=None):
"""Creates a temporary directory and returns the path.
The path is stored in an array for later cleanup.
"""
tmpdir = tempfile.mkdtemp(dir=parent)
tempdirs.append(tmpdir)
return tmpdir
def make_empty_files(files):
"""Creates each file in the given list and any intermediate directories."""
for f in files:
path = os.path.dirname(f)
if path and not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
logging.error('Unable to create directory %s: %s', path, e)
continue
try:
with open(f, 'w'):
# Set the file access and modified times to the current time.
os.utime(f, None)
except IOError as e:
logging.error('Unable to create empty file %s: %s', f, e)
def walk_parents(path):
"""Walks up the tree to the root directory."""
while os.path.splitdrive(path)[1] != os.sep:
yield path
path = os.path.dirname(path)
def get_home_path():
"""Retrieve the homepath."""
if 'HOME' in os.environ:
return os.environ['HOME']
elif 'APPDATA' in os.environ:
return os.environ['APPDATA']
else:
return ''
def get_config_paths():
"""Return the paths to each :file:`.reviewboardrc` influencing the cwd.
A list of paths to :file:`.reviewboardrc` files will be returned, where
each subsequent list entry should have lower precedence than the previous.
i.e. configuration found in files further up the list will take precedence.
Configuration in the paths set in :envvar:`$RBTOOLS_CONFIG_PATH` will take
precedence over files found in the current working directory or its
parents.
"""
config_paths = []
# Apply config files from $RBTOOLS_CONFIG_PATH first, ...
for path in os.environ.get('RBTOOLS_CONFIG_PATH', '').split(os.pathsep):
# Filter out empty paths, this also takes care of if
# $RBTOOLS_CONFIG_PATH is unset or empty.
if not path:
continue
filename = os.path.realpath(os.path.join(path, CONFIG_FILE))
if (os.path.exists(filename) and
filename not in config_paths):
config_paths.append(filename)
# ... then config files from the current or parent directories.
for path in walk_parents(os.getcwd()):
filename = os.path.realpath(os.path.join(path, CONFIG_FILE))
if (os.path.exists(filename) and
filename not in config_paths):
config_paths.append(filename)
# Finally, the user's own config file.
home_config_path = os.path.realpath(os.path.join(get_home_path(),
CONFIG_FILE))
if (os.path.exists(home_config_path) and
home_config_path not in config_paths):
config_paths.append(home_config_path)
return config_paths
def parse_config_file(filename):
"""Parse a .reviewboardrc file.
Returns a dictionary containing the configuration from the file.
The ``filename`` argument should contain a full path to a
.reviewboardrc file.
"""
config = {
'TREES': {},
'ALIASES': {},
}
try:
config = _load_python_file(filename, config)
except SyntaxError as e:
die('Syntax error in config file: %s\n'
'Line %i offset %i\n' % (filename, e.lineno, e.offset))
return dict((k, config[k])
for k in set(config.keys()) - set(builtin.keys()))
def load_config():
"""Load configuration from .reviewboardrc files.
This will read all of the .reviewboardrc files influencing the
cwd and return a dictionary containing the configuration.
"""
config = {}
trees = {}
aliases = {}
for filename in reversed(get_config_paths()):
parsed_config = parse_config_file(filename)
trees.update(parsed_config.pop('TREES'))
aliases.update(parsed_config.pop('ALIASES'))
config.update(parsed_config)
config['TREES'] = trees
config['ALIASES'] = aliases
return config
# This extracts a dictionary of the built-in globals in order to have a clean
# dictionary of settings, consisting of only what has been specified in the
# config file.
exec('True', builtin)
| |
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility functions
"""
import calendar
import errno
import logging
import os
import re
import stat
import time
logger = logging.getLogger('waitress')
queue_logger = logging.getLogger('waitress.queue')
def find_double_newline(s):
"""Returns the position just after a double newline in the given string."""
pos1 = s.find(b'\n\r\n') # One kind of double newline
if pos1 >= 0:
pos1 += 3
pos2 = s.find(b'\n\n') # Another kind of double newline
if pos2 >= 0:
pos2 += 2
if pos1 >= 0:
if pos2 >= 0:
return min(pos1, pos2)
else:
return pos1
else:
return pos2
def concat(*args):
return ''.join(args)
def join(seq, field=' '):
return field.join(seq)
def group(s):
return '(' + s + ')'
short_days = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
long_days = ['sunday', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday']
short_day_reg = group(join(short_days, '|'))
long_day_reg = group(join(long_days, '|'))
daymap = {}
for i in range(7):
daymap[short_days[i]] = i
daymap[long_days[i]] = i
hms_reg = join(3 * [group('[0-9][0-9]')], ':')
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec']
monmap = {}
for i in range(12):
monmap[months[i]] = i + 1
months_reg = group(join(months, '|'))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date = join(
[concat(short_day_reg, ','), # day
group('[0-9][0-9]?'), # date
months_reg, # month
group('[0-9]+'), # year
hms_reg, # hour minute second
'gmt'
],
' '
)
rfc822_reg = re.compile(rfc822_date)
def unpack_rfc822(m):
g = m.group
return (
int(g(4)), # year
monmap[g(3)], # month
int(g(2)), # day
int(g(5)), # hour
int(g(6)), # minute
int(g(7)), # second
0,
0,
0,
)
# rfc850 format
rfc850_date = join(
[
concat(long_day_reg, ','),
join(
[
group('[0-9][0-9]?'),
months_reg,
group('[0-9]+')
],
'-'
),
hms_reg,
'gmt'
],
' '
)
rfc850_reg = re.compile(rfc850_date)
# they actually unpack the same way
def unpack_rfc850(m):
g = m.group
yr = g(4)
if len(yr) == 2:
yr = '19' + yr
return (
int(yr), # year
monmap[g(3)], # month
int(g(2)), # day
int(g(5)), # hour
int(g(6)), # minute
int(g(7)), # second
0,
0,
0
)
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def build_http_date(when):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
def parse_http_date(d):
d = d.lower()
m = rfc850_reg.match(d)
if m and m.end() == len(d):
retval = int(calendar.timegm(unpack_rfc850(m)))
else:
m = rfc822_reg.match(d)
if m and m.end() == len(d):
retval = int(calendar.timegm(unpack_rfc822(m)))
else:
return 0
return retval
# RFC 5234 Appendix B.1 "Core Rules":
# VCHAR = %x21-7E
# ; visible (printing) characters
vchar_re = '\x21-\x7e'
# RFC 7230 Section 3.2.6 "Field Value Components":
# quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
# qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
# obs-text = %x80-FF
# quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
obs_text_re = '\x80-\xff'
# The '\\' between \x5b and \x5d is needed to escape \x5d (']')
qdtext_re = '[\t \x21\x23-\x5b\\\x5d-\x7e' + obs_text_re + ']'
quoted_pair_re = r'\\' + '([\t ' + vchar_re + obs_text_re + '])'
quoted_string_re = \
'"(?:(?:' + qdtext_re + ')|(?:' + quoted_pair_re + '))*"'
quoted_string = re.compile(quoted_string_re)
quoted_pair = re.compile(quoted_pair_re)
def undquote(value):
if value.startswith('"') and value.endswith('"'):
# So it claims to be DQUOTE'ed, let's validate that
matches = quoted_string.match(value)
if matches and matches.end() == len(value):
# Remove the DQUOTE's from the value
value = value[1:-1]
# Remove all backslashes that are followed by a valid vchar or
# obs-text
value = quoted_pair.sub(r'\1', value)
return value
elif not value.startswith('"') and not value.endswith('"'):
return value
raise ValueError('Invalid quoting in value')
def cleanup_unix_socket(path):
try:
st = os.stat(path)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise # pragma: no cover
else:
if stat.S_ISSOCK(st.st_mode):
try:
os.remove(path)
except OSError: # pragma: no cover
# avoid race condition error during tests
pass
class Error(object):
def __init__(self, body):
self.body = body
def to_response(self):
status = '%s %s' % (self.code, self.reason)
body = '%s\r\n\r\n%s' % (self.reason, self.body)
tag = '\r\n\r\n(generated by waitress)'
body = body + tag
headers = [('Content-Type', 'text/plain')]
return status, headers, body
def wsgi_response(self, environ, start_response):
status, headers, body = self.to_response()
start_response(status, headers)
yield body
class BadRequest(Error):
code = 400
reason = 'Bad Request'
class RequestHeaderFieldsTooLarge(BadRequest):
code = 431
reason = 'Request Header Fields Too Large'
class RequestEntityTooLarge(BadRequest):
code = 413
reason = 'Request Entity Too Large'
class InternalServerError(Error):
code = 500
reason = 'Internal Server Error'
| |
from bson import DBRef, SON
from base import (BaseDict, BaseList, TopLevelDocumentMetaclass, get_document)
from fields import (ReferenceField, ListField, DictField, MapField)
from connection import get_db
from queryset import QuerySet
from document import Document, EmbeddedDocument
class DeReference(object):
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the conversion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param get: A boolean determining if being called by __get__
"""
if items is None or isinstance(items, basestring):
return items
# cheapest way to convert a queryset to a list
# list(queryset) uses a count() query to determine length
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and isinstance(instance, (Document, EmbeddedDocument,
TopLevelDocumentMetaclass)):
doc_type = instance._fields.get(name)
while hasattr(doc_type, 'field'):
doc_type = doc_type.field
if isinstance(doc_type, ReferenceField):
field = doc_type
doc_type = doc_type.document_type
is_list = not hasattr(items, 'items')
if is_list and all([i.__class__ == doc_type for i in items]):
return items
elif not is_list and all([i.__class__ == doc_type
for i in items.values()]):
return items
elif not field.dbref:
if not hasattr(items, 'items'):
def _get_items(items):
new_items = []
for v in items:
if isinstance(v, list):
new_items.append(_get_items(v))
elif not isinstance(v, (DBRef, Document)):
new_items.append(field.to_python(v))
else:
new_items.append(v)
return new_items
items = _get_items(items)
else:
items = dict([
(k, field.to_python(v))
if not isinstance(v, (DBRef, Document)) else (k, v)
for k, v in items.iteritems()]
)
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
# Determine the iterator to use
if not hasattr(items, 'items'):
iterator = enumerate(items)
else:
iterator = items.iteritems()
# Recursively find dbreferences
depth += 1
for k, item in iterator:
if isinstance(item, (Document, EmbeddedDocument)):
for field_name, field in item._fields.iteritems():
v = item._data.get(field_name, None)
if isinstance(v, (DBRef)):
reference_map.setdefault(field.document_type, []).append(v.id)
elif isinstance(v, (dict, SON)) and '_ref' in v:
reference_map.setdefault(get_document(v['_cls']), []).append(v['_ref'].id)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
field_cls = getattr(getattr(field, 'field', None), 'document_type', None)
references = self._find_references(v, depth)
for key, refs in references.iteritems():
if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):
key = field_cls
reference_map.setdefault(key, []).extend(refs)
elif isinstance(item, (DBRef)):
reference_map.setdefault(item.collection, []).append(item.id)
elif isinstance(item, (dict, SON)) and '_ref' in item:
reference_map.setdefault(get_document(item['_cls']), []).append(item['_ref'].id)
elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in references.iteritems():
reference_map.setdefault(key, []).extend(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for collection, dbrefs in self.reference_map.iteritems():
keys = object_map.keys()
refs = list(set([dbref for dbref in dbrefs if unicode(dbref).encode('utf-8') not in keys]))
if hasattr(collection, 'objects'): # We have a document class for the refs
references = collection.objects.in_bulk(refs)
for key, doc in references.iteritems():
object_map[key] = doc
else: # Generic reference: use the refs data to convert to document
if isinstance(doc_type, (ListField, DictField, MapField,)):
continue
if doc_type:
references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
else:
references = get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
if '_cls' in ref:
doc = get_document(ref["_cls"])._from_son(ref)
elif doc_type is None:
doc = get_document(
''.join(x.capitalize()
for x in collection.split('_')))._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if '_ref' in items:
return self.object_map.get(items['_ref'].id, items)
elif '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
doc._data = self._attach_objects(doc._data, depth, doc, None)
return doc
if not hasattr(items, 'items'):
is_list = True
as_tuple = isinstance(items, tuple)
iterator = enumerate(items)
data = []
else:
is_list = False
iterator = items.iteritems()
data = {}
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map and not is_list:
data[k] = self.object_map[k]
elif isinstance(v, (Document, EmbeddedDocument)):
for field_name, field in v._fields.iteritems():
v = data[k]._data.get(field_name, None)
if isinstance(v, (DBRef)):
data[k]._data[field_name] = self.object_map.get(v.id, v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)
elif isinstance(v, dict) and depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=name)
elif isinstance(v, (list, tuple)) and depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=name)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = '%s.%s' % (name, k) if name else name
data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)
elif hasattr(v, 'id'):
data[k] = self.object_map.get(v.id, v)
if instance and name:
if is_list:
return tuple(data) if as_tuple else BaseList(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
| |
"""
Functions to generate Theano update dictionaries for training.
The update functions implement different methods to control the learning
rate for use with stochastic gradient descent.
Update functions take a loss expression or a list of gradient expressions and
a list of parameters as input and return an ordered dictionary of updates:
.. autosummary::
:nosignatures:
sgd
momentum
nesterov_momentum
adagrad
rmsprop
adadelta
adam
adamax
Two functions can be used to further modify the updates to include momentum:
.. autosummary::
:nosignatures:
apply_momentum
apply_nesterov_momentum
Finally, we provide two helper functions to constrain the norm of tensors:
.. autosummary::
:nosignatures:
norm_constraint
total_norm_constraint
:func:`norm_constraint()` can be used to constrain the norm of parameters
(as an alternative to weight decay), or for a form of gradient clipping.
:func:`total_norm_constraint()` constrain the total norm of a list of tensors.
This is often used when training recurrent neural networks.
Examples
--------
>>> import lasagne
>>> import theano.tensor as T
>>> import theano
>>> from lasagne.nonlinearities import softmax
>>> from lasagne.layers import InputLayer, DenseLayer, get_output
>>> from lasagne.updates import sgd, apply_momentum
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=3, nonlinearity=softmax)
>>> x = T.matrix('x') # shp: num_batch x num_features
>>> y = T.ivector('y') # shp: num_batch
>>> l_out = get_output(l1, x)
>>> params = lasagne.layers.get_all_params(l1)
>>> loss = T.mean(T.nnet.categorical_crossentropy(l_out, y))
>>> updates_sgd = sgd(loss, params, learning_rate=0.0001)
>>> updates = apply_momentum(updates_sgd, params, momentum=0.9)
>>> train_function = theano.function([x, y], updates=updates)
"""
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from . import utils
__all__ = [
"sgd",
"apply_momentum",
"momentum",
"apply_nesterov_momentum",
"nesterov_momentum",
"adagrad",
"rmsprop",
"adadelta",
"adam",
"adamax",
"norm_constraint",
"total_norm_constraint"
]
def get_or_compute_grads(loss_or_grads, params):
"""Helper function returning a list of gradients
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to return the gradients for
Returns
-------
list of expressions
If `loss_or_grads` is a list, it is assumed to be a list of
gradients and returned as is, unless it does not match the length
of `params`, in which case a `ValueError` is raised.
Otherwise, `loss_or_grads` is assumed to be a cost expression and
the function returns `theano.grad(loss_or_grads, params)`.
Raises
------
ValueError
If `loss_or_grads` is a list of a different length than `params`, or if
any element of `params` is not a shared variable (while we could still
compute its gradient, we can never update it and want to fail early).
"""
if any(not isinstance(p, theano.compile.SharedVariable) for p in params):
raise ValueError("params must contain shared variables only. If it "
"contains arbitrary parameter expressions, then "
"lasagne.utils.collect_shared_vars() may help you.")
if isinstance(loss_or_grads, list):
if not len(loss_or_grads) == len(params):
raise ValueError("Got %d gradient expressions for %d parameters" %
(len(loss_or_grads), len(params)))
return loss_or_grads
else:
return theano.grad(loss_or_grads, params)
def sgd(loss_or_grads, params, learning_rate):
"""Stochastic Gradient Descent (SGD) updates
Generates update expressions of the form:
* ``param := param - learning_rate * gradient``
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * grad
return updates
def apply_momentum(updates, params=None, momentum=0.9):
"""Returns a modified update dictionary including momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity + updates[param] - param``
* ``param := param + velocity``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
params : iterable of shared variables, optional
The variables to apply momentum to. If omitted, will apply
momentum to all `updates.keys()`.
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A copy of `updates` with momentum updates for all `params`.
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
See Also
--------
momentum : Shortcut applying momentum to SGD updates
"""
if params is None:
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
x = momentum * velocity + updates[param]
updates[velocity] = x - param
updates[param] = x
return updates
def momentum(loss_or_grads, params, learning_rate, momentum=0.9):
"""Stochastic Gradient Descent (SGD) updates with momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity - learning_rate * gradient``
* ``param := param + velocity``
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
See Also
--------
apply_momentum : Generic function applying momentum to updates
nesterov_momentum : Nesterov's variant of SGD with momentum
"""
updates = sgd(loss_or_grads, params, learning_rate)
return apply_momentum(updates, momentum=momentum)
def apply_nesterov_momentum(updates, params=None, momentum=0.9):
"""Returns a modified update dictionary including Nesterov momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity + updates[param] - param``
* ``param := param + momentum * velocity + updates[param] - param``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
params : iterable of shared variables, optional
The variables to apply momentum to. If omitted, will apply
momentum to all `updates.keys()`.
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A copy of `updates` with momentum updates for all `params`.
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
The classic formulation of Nesterov momentum (or Nesterov accelerated
gradient) requires the gradient to be evaluated at the predicted next
position in parameter space. Here, we use the formulation described at
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,
which allows the gradient to be evaluated at the current parameters.
See Also
--------
nesterov_momentum : Shortcut applying Nesterov momentum to SGD updates
"""
if params is None:
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
x = momentum * velocity + updates[param] - param
updates[velocity] = x
updates[param] = momentum * x + updates[param]
return updates
def nesterov_momentum(loss_or_grads, params, learning_rate, momentum=0.9):
"""Stochastic Gradient Descent (SGD) updates with Nesterov momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity - learning_rate * gradient``
* ``param := param + momentum * velocity - learning_rate * gradient``
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
The classic formulation of Nesterov momentum (or Nesterov accelerated
gradient) requires the gradient to be evaluated at the predicted next
position in parameter space. Here, we use the formulation described at
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,
which allows the gradient to be evaluated at the current parameters.
See Also
--------
apply_nesterov_momentum : Function applying momentum to updates
"""
updates = sgd(loss_or_grads, params, learning_rate)
return apply_nesterov_momentum(updates, momentum=momentum)
def adagrad(loss_or_grads, params, learning_rate=1.0, epsilon=1e-6):
"""Adagrad updates
Scale learning rates by dividing with the square root of accumulated
squared gradients. See [1]_ for further description.
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Using step size eta Adagrad calculates the learning rate for feature i at
time step t as:
.. math:: \\eta_{t,i} = \\frac{\\eta}
{\\sqrt{\\sum^t_{t^\\prime} g^2_{t^\\prime,i}+\\epsilon}} g_{t,i}
as such the learning rate is monotonically decreasing.
Epsilon is not included in the typical formula, see [2]_.
References
----------
.. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):
Adaptive subgradient methods for online learning and stochastic
optimization. JMLR, 12:2121-2159.
.. [2] Chris Dyer:
Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
accu_new = accu + grad ** 2
updates[accu] = accu_new
updates[param] = param - (learning_rate * grad /
T.sqrt(accu_new + epsilon))
return updates
def rmsprop(loss_or_grads, params, learning_rate=1.0, rho=0.9, epsilon=1e-6):
"""RMSProp updates
Scale learning rates by dividing with the moving average of the root mean
squared (RMS) gradients. See [1]_ for further description.
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
rho : float or symbolic scalar
Gradient moving average decay factor
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
`rho` should be between 0 and 1. A value of `rho` close to 1 will decay the
moving average slowly and a value close to 0 will decay the moving average
fast.
Using the step size :math:`\\eta` and a decay factor :math:`\\rho` the
learning rate :math:`\\eta_t` is calculated as:
.. math::
r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\
\\eta_t &= \\frac{\\eta}{\\sqrt{r_t + \\epsilon}}
References
----------
.. [1] Tieleman, T. and Hinton, G. (2012):
Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.
Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
accu_new = rho * accu + (1 - rho) * grad ** 2
updates[accu] = accu_new
updates[param] = param - (learning_rate * grad /
T.sqrt(accu_new + epsilon))
return updates
def adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
""" Adadelta updates
Scale learning rates by a the ratio of accumulated gradients to accumulated
step sizes, see [1]_ and notes for further description.
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
rho : float or symbolic scalar
Squared gradient moving average decay factor
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
rho should be between 0 and 1. A value of rho close to 1 will decay the
moving average slowly and a value close to 0 will decay the moving average
fast.
rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to
work for multiple datasets (MNIST, speech).
In the paper, no learning rate is considered (so learning_rate=1.0).
Probably best to keep it at this value.
epsilon is important for the very first update (so the numerator does
not become 0).
Using the step size eta and a decay factor rho the learning rate is
calculated as:
.. math::
r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\
\\eta_t &= \\eta \\frac{\\sqrt{s_{t-1} + \\epsilon}}
{\sqrt{r_t + \epsilon}}\\\\
s_t &= \\rho s_{t-1} + (1-\\rho)*(\\eta_t*g)^2
References
----------
.. [1] Zeiler, M. D. (2012):
ADADELTA: An Adaptive Learning Rate Method.
arXiv Preprint arXiv:1212.5701.
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
# accu: accumulate gradient magnitudes
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
# delta_accu: accumulate update magnitudes (recursively!)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
# update accu (as in rmsprop)
accu_new = rho * accu + (1 - rho) * grad ** 2
updates[accu] = accu_new
# compute parameter update, using the 'old' delta_accu
update = (grad * T.sqrt(delta_accu + epsilon) /
T.sqrt(accu_new + epsilon))
updates[param] = param - learning_rate * update
# update delta_accu (as accu, but accumulating updates)
delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2
updates[delta_accu] = delta_accu_new
return updates
def adam(loss_or_grads, params, learning_rate=0.001, beta1=0.9,
beta2=0.999, epsilon=1e-8):
"""Adam updates
Adam updates implemented as in [1]_.
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float
Learning rate
beta1 : float
Exponential decay rate for the first moment estimates.
beta2 : float
Exponential decay rate for the second moment estimates.
epsilon : float
Constant for numerical stability.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
The paper [1]_ includes an additional hyperparameter lambda. This is only
needed to prove convergence of the algorithm and has no practical use
(personal communication with the authors), it is therefore omitted here.
References
----------
.. [1] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.))
updates = OrderedDict()
t = t_prev + 1
a_t = learning_rate*T.sqrt(1-beta2**t)/(1-beta1**t)
for param, g_t in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
m_t = beta1*m_prev + (1-beta1)*g_t
v_t = beta2*v_prev + (1-beta2)*g_t**2
step = a_t*m_t/(T.sqrt(v_t) + epsilon)
updates[m_prev] = m_t
updates[v_prev] = v_t
updates[param] = param - step
updates[t_prev] = t
return updates
def adamax(loss_or_grads, params, learning_rate=0.002, beta1=0.9,
beta2=0.999, epsilon=1e-8):
"""Adamax updates
Adamax updates implemented as in [1]_. This is a variant of of the Adam
algorithm based on the infinity norm.
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float
Learning rate
beta1 : float
Exponential decay rate for the first moment estimates.
beta2 : float
Exponential decay rate for the weighted infinity norm estimates.
epsilon : float
Constant for numerical stability.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
References
----------
.. [1] Kingma, Diederik, and Jimmy Ba (2014):
Adam: A Method for Stochastic Optimization.
arXiv preprint arXiv:1412.6980.
"""
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.))
updates = OrderedDict()
t = t_prev + 1
a_t = learning_rate/(1-beta1**t)
for param, g_t in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
u_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
m_t = beta1*m_prev + (1-beta1)*g_t
u_t = T.maximum(beta2*u_prev, abs(g_t))
step = a_t*m_t/(u_t + epsilon)
updates[m_prev] = m_t
updates[u_prev] = u_t
updates[param] = param - step
updates[t_prev] = t
return updates
def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-7):
"""Max weight norm constraints and gradient clipping
This takes a TensorVariable and rescales it so that incoming weight
norms are below a specified constraint value. Vectors violating the
constraint are rescaled so that they are within the allowed range.
Parameters
----------
tensor_var : TensorVariable
Theano expression for update, gradient, or other quantity.
max_norm : scalar
This value sets the maximum allowed value of any norm in
`tensor_var`.
norm_axes : sequence (list or tuple)
The axes over which to compute the norm. This overrides the
default norm axes defined for the number of dimensions
in `tensor_var`. When this is not specified and `tensor_var` is a
matrix (2D), this is set to `(0,)`. If `tensor_var` is a 3D, 4D or
5D tensor, it is set to a tuple listing all axes but axis 0. The
former default is useful for working with dense layers, the latter
is useful for 1D, 2D and 3D convolutional layers.
(Optional)
epsilon : scalar, optional
Value used to prevent numerical instability when dividing by
very small or zero norms.
Returns
-------
TensorVariable
Input `tensor_var` with rescaling applied to weight vectors
that violate the specified constraints.
Examples
--------
>>> param = theano.shared(
... np.random.randn(100, 200).astype(theano.config.floatX))
>>> update = param + 100
>>> update = norm_constraint(update, 10)
>>> func = theano.function([], [], updates=[(param, update)])
>>> # Apply constrained update
>>> _ = func()
>>> from lasagne.utils import compute_norms
>>> norms = compute_norms(param.get_value())
>>> np.isclose(np.max(norms), 10)
True
Notes
-----
When `norm_axes` is not specified, the axes over which the norm is
computed depend on the dimensionality of the input variable. If it is
2D, it is assumed to come from a dense layer, and the norm is computed
over axis 0. If it is 3D, 4D or 5D, it is assumed to come from a
convolutional layer and the norm is computed over all trailing axes
beyond axis 0. For other uses, you should explicitly specify the axes
over which to compute the norm using `norm_axes`.
"""
ndim = tensor_var.ndim
if norm_axes is not None:
sum_over = tuple(norm_axes)
elif ndim == 2: # DenseLayer
sum_over = (0,)
elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer
sum_over = tuple(range(1, ndim))
else:
raise ValueError(
"Unsupported tensor dimensionality {}."
"Must specify `norm_axes`".format(ndim)
)
dtype = np.dtype(theano.config.floatX).type
norms = T.sqrt(T.sum(T.sqr(tensor_var), axis=sum_over, keepdims=True))
target_norms = T.clip(norms, 0, dtype(max_norm))
constrained_output = \
(tensor_var * (target_norms / (dtype(epsilon) + norms)))
return constrained_output
def total_norm_constraint(tensor_vars, max_norm, epsilon=1e-7,
return_norm=False):
"""Rescales a list of tensors based on their combined norm
If the combined norm of the input tensors exceeds the threshold then all
tensors are rescaled such that the combined norm is equal to the threshold.
Scaling the norms of the gradients is often used when training recurrent
neural networks [1]_.
Parameters
----------
tensor_vars : List of TensorVariables.
Tensors to be rescaled.
max_norm : float
Threshold value for total norm.
epsilon : scalar, optional
Value used to prevent numerical instability when dividing by
very small or zero norms.
return_norm : bool
If true the total norm is also returned.
Returns
-------
tensor_vars_scaled : list of TensorVariables
The scaled tensor variables.
norm : Theano scalar
The combined norms of the input variables prior to rescaling,
only returned if ``return_norms=True``.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> import lasagne
>>> from lasagne.updates import sgd, total_norm_constraint
>>> x = T.matrix()
>>> y = T.ivector()
>>> l_in = InputLayer((5, 10))
>>> l1 = DenseLayer(l_in, num_units=7, nonlinearity=T.nnet.softmax)
>>> output = lasagne.layers.get_output(l1, x)
>>> cost = T.mean(T.nnet.categorical_crossentropy(output, y))
>>> all_params = lasagne.layers.get_all_params(l1)
>>> all_grads = T.grad(cost, all_params)
>>> scaled_grads = total_norm_constraint(all_grads, 5)
>>> updates = sgd(scaled_grads, all_params, learning_rate=0.1)
Notes
-----
The total norm can be used to monitor training.
References
----------
.. [1] Sutskever, I., Vinyals, O., & Le, Q. V. (2014): Sequence to sequence
learning with neural networks. In Advances in Neural Information
Processing Systems (pp. 3104-3112).
"""
norm = T.sqrt(sum(T.sum(tensor**2) for tensor in tensor_vars))
dtype = np.dtype(theano.config.floatX).type
target_norm = T.clip(norm, 0, dtype(max_norm))
multiplier = target_norm / (dtype(epsilon) + norm)
tensor_vars_scaled = [step*multiplier for step in tensor_vars]
if return_norm:
return tensor_vars_scaled, norm
else:
return tensor_vars_scaled
| |
# test_cmake.py - Unit tests for swift_build_support.cmake -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import unittest
from argparse import Namespace
from swift_build_support.cmake import CMake, CMakeOptions
from swift_build_support.toolchain import host_toolchain
class CMakeTestCase(unittest.TestCase):
def mock_distcc_path(self):
"""Return a path string of mock distcc executable
"""
return os.path.join(os.path.dirname(__file__),
'mock-distcc')
def default_args(self):
"""Return new args object with default values
"""
return Namespace(host_cc="/path/to/clang",
host_cxx="/path/to/clang++",
enable_asan=False,
enable_ubsan=False,
export_compile_commands=False,
distcc=False,
cmake_generator="Ninja",
clang_compiler_version=None,
build_jobs=8,
build_args=[],
verbose_build=False)
def cmake(self, args):
"""Return new CMake object initialized with given args
"""
toolchain = host_toolchain()
toolchain.cc = args.host_cc
toolchain.cxx = args.host_cxx
if args.distcc:
toolchain.distcc = self.mock_distcc_path()
return CMake(args=args, toolchain=toolchain)
def test_common_options_defaults(self):
args = self.default_args()
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++"])
def test_common_options_asan(self):
args = self.default_args()
args.enable_asan = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DLLVM_USE_SANITIZER=Address",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++"])
def test_common_options_ubsan(self):
args = self.default_args()
args.enable_ubsan = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DLLVM_USE_SANITIZER=Undefined",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++"])
def test_common_options_asan_ubsan(self):
args = self.default_args()
args.enable_asan = True
args.enable_ubsan = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DLLVM_USE_SANITIZER=Address;Undefined",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++"])
def test_common_options_export_compile_commands(self):
args = self.default_args()
args.export_compile_commands = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++"])
def test_common_options_distcc(self):
args = self.default_args()
args.distcc = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DCMAKE_C_COMPILER:PATH=" + self.mock_distcc_path(),
"-DCMAKE_C_COMPILER_ARG1=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=" + self.mock_distcc_path(),
"-DCMAKE_CXX_COMPILER_ARG1=/path/to/clang++"])
def test_common_options_xcode(self):
args = self.default_args()
args.cmake_generator = 'Xcode'
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Xcode",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++",
"-DCMAKE_CONFIGURATION_TYPES=" +
"Debug;Release;MinSizeRel;RelWithDebInfo"])
def test_common_options_clang_compiler_version(self):
args = self.default_args()
args.clang_compiler_version = ("3", "8", "0")
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Ninja",
"-DCMAKE_C_COMPILER:PATH=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=/path/to/clang++",
"-DLLVM_VERSION_MAJOR:STRING=3",
"-DLLVM_VERSION_MINOR:STRING=8",
"-DLLVM_VERSION_PATCH:STRING=0"])
def test_common_options_full(self):
args = self.default_args()
args.enable_asan = True
args.enable_ubsan = True
args.export_compile_commands = True
args.distcc = True
args.cmake_generator = 'Xcode'
args.clang_compiler_version = ("3", "8", "0")
cmake = self.cmake(args)
self.assertEqual(
list(cmake.common_options()),
["-G", "Xcode",
"-DLLVM_USE_SANITIZER=Address;Undefined",
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DCMAKE_C_COMPILER:PATH=" + self.mock_distcc_path(),
"-DCMAKE_C_COMPILER_ARG1=/path/to/clang",
"-DCMAKE_CXX_COMPILER:PATH=" + self.mock_distcc_path(),
"-DCMAKE_CXX_COMPILER_ARG1=/path/to/clang++",
"-DCMAKE_CONFIGURATION_TYPES=" +
"Debug;Release;MinSizeRel;RelWithDebInfo",
"-DLLVM_VERSION_MAJOR:STRING=3",
"-DLLVM_VERSION_MINOR:STRING=8",
"-DLLVM_VERSION_PATCH:STRING=0"])
def test_build_args_ninja(self):
args = self.default_args()
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-j8"])
args.verbose_build = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-j8", "-v"])
def test_build_args_makefile(self):
args = self.default_args()
args.cmake_generator = "Unix Makefiles"
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-j8"])
args.verbose_build = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-j8", "VERBOSE=1"])
def test_build_args_xcode(self):
args = self.default_args()
args.cmake_generator = "Xcode"
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-parallelizeTargets", "-jobs", "8"])
# NOTE: Xcode generator DOES NOT take 'verbose-build' into account.
args.verbose_build = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-parallelizeTargets", "-jobs", "8"])
def test_build_args_eclipse_ninja(self):
# NOTE: Eclipse generator DOES NOT take 'build-jobs' into account,
# nor 'verbose-build'.
args = self.default_args()
args.cmake_generator = "Eclipse CDT4 - Ninja"
args.verbose_build = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()), [])
def test_build_args_custom_build_args(self):
args = self.default_args()
args.build_args = ["-foo", "bar baz"]
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-foo", "bar baz", "-j8"])
def test_build_args_distcc(self):
args = self.default_args()
args.distcc = True
cmake = self.cmake(args)
self.assertEqual(
list(cmake.build_args()),
["-j6"])
class CMakeOptionsTestCase(unittest.TestCase):
def test_define(self):
options = CMakeOptions()
options.define('OPT1:STRING', 'foo')
options.define('OPT2:BOOL', True)
options.define('OPT3:BOOL', 1)
options.define('OPT4:BOOL', 'True')
options.define('OPT5:BOOL', 'true')
options.define('OPT6:BOOL', 'YES')
options.define('OPT7:BOOL', '1')
options.define('OPT8:BOOL', False)
options.define('OPT9:BOOL', 0)
options.define('OPT10:BOOL', 'false')
options.define('OPT11:BOOL', 'False')
options.define('OPT12:BOOL', 'No')
options.define('OPT13:BOOL', '0')
options.define('OPT14', 12)
options.define('OPT15', '')
options.define('OPT16', None)
options.define('OPT17:PATH', 'foo')
self.assertRaises(ValueError, options.define, 'ERR', ["FOO"])
self.assertRaises(ValueError, options.define, 'ERR', {"FOO": 1})
self.assertRaises(ValueError, options.define, 'ERR:BOOL', None)
self.assertRaises(ValueError, options.define, 'ERR:BOOL', 3)
self.assertRaises(ValueError, options.define, 'ERR:BOOL', 'foo')
self.assertRaises(ValueError, options.define, 'ERR:BOOL', [1])
self.assertEqual(list(options), [
'-DOPT1:STRING=foo',
'-DOPT2:BOOL=TRUE',
'-DOPT3:BOOL=TRUE',
'-DOPT4:BOOL=TRUE',
'-DOPT5:BOOL=TRUE',
'-DOPT6:BOOL=TRUE',
'-DOPT7:BOOL=TRUE',
'-DOPT8:BOOL=FALSE',
'-DOPT9:BOOL=FALSE',
'-DOPT10:BOOL=FALSE',
'-DOPT11:BOOL=FALSE',
'-DOPT12:BOOL=FALSE',
'-DOPT13:BOOL=FALSE',
'-DOPT14=12',
'-DOPT15=',
'-DOPT16=',
'-DOPT17:PATH=foo'])
def test_operations(self):
options1 = CMakeOptions()
options1.define("OPT1_1", 'VAL1')
options1.define("OPT1_2", 'VAL2')
options2 = CMakeOptions()
options2.define("OPT2_1", 'VAL3')
options = options1 + options2
self.assertIsInstance(options, CMakeOptions)
self.assertEqual(list(options), [
"-DOPT1_1=VAL1",
"-DOPT1_2=VAL2",
"-DOPT2_1=VAL3"])
options_added = options + ["-CUSTOM", "12"]
self.assertIsInstance(options_added, CMakeOptions)
self.assertEqual(list(options_added), [
"-DOPT1_1=VAL1",
"-DOPT1_2=VAL2",
"-DOPT2_1=VAL3",
"-CUSTOM", "12"])
options += options2
self.assertIsInstance(options, CMakeOptions)
self.assertEqual(list(options), [
"-DOPT1_1=VAL1",
"-DOPT1_2=VAL2",
"-DOPT2_1=VAL3",
"-DOPT2_1=VAL3"])
options += ["-G", "Ninja"]
self.assertIsInstance(options, CMakeOptions)
self.assertEqual(list(options), [
"-DOPT1_1=VAL1",
"-DOPT1_2=VAL2",
"-DOPT2_1=VAL3",
"-DOPT2_1=VAL3",
"-G", "Ninja"])
list_options = ["-G", "Ninja"]
list_options += options1
self.assertIsInstance(list_options, list)
self.assertEqual(list_options, [
"-G", "Ninja",
"-DOPT1_1=VAL1",
"-DOPT1_2=VAL2"])
if __name__ == '__main__':
unittest.main()
| |
'''
PDF Filters
This file is part of the phoneyPDF Framework
Kiran Bandla <kbandla@intovoid.com>
Copyright (c) 2013, VERISIGN, Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of VERISIGN nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import cStringIO
# Implementation of various PDF Filters
#
# Arbitrary Decoders:
# 1. FlateDecode - Yes
# 2. LZWDecode - Yes
# 3. RunLengthDecode - Yes
#
# Image Decoders:
# 1. JBIG2-Decode - No
# 2. CCITTFaxDecode - No
# 3. DCTDecode - No
# 4. JPXDecode - No
#
# 8-bit ASCII Decoders:
# 1. ASCIIHexDecode - Yes
# 2. ASCII85Decode - Yes
#
# Others
# 1. Crypt - No
# http://code.google.com/p/pdfminerr/source/browse/trunk/pdfminer/pdfminer/ascii85.py
def ASCII85Decode(data):
import struct
n = b = 0
out = ''
for c in data:
if '!' <= c and c <= 'u':
n += 1
b = b*85+(ord(c)-33)
if n == 5:
out += struct.pack('>L',b)
n = b = 0
elif c == 'z':
assert n == 0
out += '\0\0\0\0'
elif c == '~':
if n:
for _ in range(5-n):
b = b*85+84
out += struct.pack('>L',b)[:n-1]
break
return out
def ASCIIHexDecode(data):
import binascii
return binascii.unhexlify(''.join([c for c in data if c not in ' \t\n\r']).rstrip('>'))
def FlateDecode(data):
import zlib
return zlib.decompress(data)
def RunLengthDecode(data):
f = cStringIO.StringIO(data)
decompressed = ''
runLength = ord(f.read(1))
while runLength:
if runLength < 128:
decompressed += f.read(runLength + 1)
if runLength > 128:
decompressed += f.read(1) * (257 - runLength)
if runLength == 128:
break
runLength = ord(f.read(1))
# return sub(r'(\d+)(\D)', lambda m: m.group(2) * int(m.group(1)), data)
return decompressed
#### LZW code sourced from pdfminer
# Copyright (c) 2004-2009 Yusuke Shinyama <yusuke at cs dot nyu dot edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
class LZWDecoder(object):
def __init__(self, fp):
self.fp = fp
self.buff = 0
self.bpos = 8
self.nbits = 9
self.table = None
self.prevbuf = None
return
def readbits(self, bits):
v = 0
while 1:
# the number of remaining bits we can get from the current buffer.
r = 8-self.bpos
if bits <= r:
# |-----8-bits-----|
# |-bpos-|-bits-| |
# | |----r----|
v = (v<<bits) | ((self.buff>>(r-bits)) & ((1<<bits)-1))
self.bpos += bits
break
else:
# |-----8-bits-----|
# |-bpos-|---bits----...
# | |----r----|
v = (v<<r) | (self.buff & ((1<<r)-1))
bits -= r
x = self.fp.read(1)
if not x: raise EOFError
self.buff = ord(x)
self.bpos = 0
return v
def feed(self, code):
x = ''
if code == 256:
self.table = [ chr(c) for c in xrange(256) ] # 0-255
self.table.append(None) # 256
self.table.append(None) # 257
self.prevbuf = ''
self.nbits = 9
elif code == 257:
pass
elif not self.prevbuf:
x = self.prevbuf = self.table[code]
else:
if code < len(self.table):
x = self.table[code]
self.table.append(self.prevbuf+x[0])
else:
self.table.append(self.prevbuf+self.prevbuf[0])
x = self.table[code]
l = len(self.table)
if l == 511:
self.nbits = 10
elif l == 1023:
self.nbits = 11
elif l == 2047:
self.nbits = 12
self.prevbuf = x
return x
def run(self):
while 1:
try:
code = self.readbits(self.nbits)
except EOFError:
break
x = self.feed(code)
yield x
return
def LZWDecode(data):
return ''.join(LZWDecoder(cStringIO.StringIO(data)).run())
def hex_decode(name):
'''
Hex-Decode a key name
Example : '/E#6dbed#64#65#64Fi#6c#65 -> /EmbeddedFile
'''
a = []
fixed_name = ''
found = False
found_length = 0
#tokens = enumerate(name)
for token in name:
if (found == False) and (token !='#'):
fixed_name += token
elif (found == False) and (token == '#'):
# We just saw a #
found = True
elif (found == True) and (token != '#'):
# We saw a token after #
if found_length <2 :
a.append(token)
found_length += 1
else:
found = False
fixed_name += chr(int(''.join(a),16))
found_length = 0
a = []
fixed_name += token
elif (found== True) and (token == '#'):
# We saw a new # after consuming tokens
found_length = 0
fixed_name += chr(int(''.join(a),16))
a = []
else:
fixed_name += token
found = False
if a:
fixed_name += chr(int(''.join(a),16))
return fixed_name
# Abbreviations
AHx = ASCIIHexDecode
A85 = ASCII85Decode
LZW = LZWDecode
Fl = FlateDecode
RL = RunLengthDecode
# TODO
#CCF = CCITTFaxDecode
#DCT = DCTDecode
##########################################################################################
# PDFDocEncoding - Ripped from pyPDF's code
# All releases of pyPdf are distributed under the terms of a modified BSD license.
_pdfDocEncoding = (
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u02d8', u'\u02c7', u'\u02c6', u'\u02d9', u'\u02dd', u'\u02db', u'\u02da', u'\u02dc',
u'\u0020', u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026', u'\u0027',
u'\u0028', u'\u0029', u'\u002a', u'\u002b', u'\u002c', u'\u002d', u'\u002e', u'\u002f',
u'\u0030', u'\u0031', u'\u0032', u'\u0033', u'\u0034', u'\u0035', u'\u0036', u'\u0037',
u'\u0038', u'\u0039', u'\u003a', u'\u003b', u'\u003c', u'\u003d', u'\u003e', u'\u003f',
u'\u0040', u'\u0041', u'\u0042', u'\u0043', u'\u0044', u'\u0045', u'\u0046', u'\u0047',
u'\u0048', u'\u0049', u'\u004a', u'\u004b', u'\u004c', u'\u004d', u'\u004e', u'\u004f',
u'\u0050', u'\u0051', u'\u0052', u'\u0053', u'\u0054', u'\u0055', u'\u0056', u'\u0057',
u'\u0058', u'\u0059', u'\u005a', u'\u005b', u'\u005c', u'\u005d', u'\u005e', u'\u005f',
u'\u0060', u'\u0061', u'\u0062', u'\u0063', u'\u0064', u'\u0065', u'\u0066', u'\u0067',
u'\u0068', u'\u0069', u'\u006a', u'\u006b', u'\u006c', u'\u006d', u'\u006e', u'\u006f',
u'\u0070', u'\u0071', u'\u0072', u'\u0073', u'\u0074', u'\u0075', u'\u0076', u'\u0077',
u'\u0078', u'\u0079', u'\u007a', u'\u007b', u'\u007c', u'\u007d', u'\u007e', u'\u0000',
u'\u2022', u'\u2020', u'\u2021', u'\u2026', u'\u2014', u'\u2013', u'\u0192', u'\u2044',
u'\u2039', u'\u203a', u'\u2212', u'\u2030', u'\u201e', u'\u201c', u'\u201d', u'\u2018',
u'\u2019', u'\u201a', u'\u2122', u'\ufb01', u'\ufb02', u'\u0141', u'\u0152', u'\u0160',
u'\u0178', u'\u017d', u'\u0131', u'\u0142', u'\u0153', u'\u0161', u'\u017e', u'\u0000',
u'\u20ac', u'\u00a1', u'\u00a2', u'\u00a3', u'\u00a4', u'\u00a5', u'\u00a6', u'\u00a7',
u'\u00a8', u'\u00a9', u'\u00aa', u'\u00ab', u'\u00ac', u'\u0000', u'\u00ae', u'\u00af',
u'\u00b0', u'\u00b1', u'\u00b2', u'\u00b3', u'\u00b4', u'\u00b5', u'\u00b6', u'\u00b7',
u'\u00b8', u'\u00b9', u'\u00ba', u'\u00bb', u'\u00bc', u'\u00bd', u'\u00be', u'\u00bf',
u'\u00c0', u'\u00c1', u'\u00c2', u'\u00c3', u'\u00c4', u'\u00c5', u'\u00c6', u'\u00c7',
u'\u00c8', u'\u00c9', u'\u00ca', u'\u00cb', u'\u00cc', u'\u00cd', u'\u00ce', u'\u00cf',
u'\u00d0', u'\u00d1', u'\u00d2', u'\u00d3', u'\u00d4', u'\u00d5', u'\u00d6', u'\u00d7',
u'\u00d8', u'\u00d9', u'\u00da', u'\u00db', u'\u00dc', u'\u00dd', u'\u00de', u'\u00df',
u'\u00e0', u'\u00e1', u'\u00e2', u'\u00e3', u'\u00e4', u'\u00e5', u'\u00e6', u'\u00e7',
u'\u00e8', u'\u00e9', u'\u00ea', u'\u00eb', u'\u00ec', u'\u00ed', u'\u00ee', u'\u00ef',
u'\u00f0', u'\u00f1', u'\u00f2', u'\u00f3', u'\u00f4', u'\u00f5', u'\u00f6', u'\u00f7',
u'\u00f8', u'\u00f9', u'\u00fa', u'\u00fb', u'\u00fc', u'\u00fd', u'\u00fe', u'\u00ff'
)
assert len(_pdfDocEncoding) == 256
_pdfDocEncoding_rev = {}
for i in xrange(256):
char = _pdfDocEncoding[i]
if char == u"\u0000":
continue
assert char not in _pdfDocEncoding_rev
_pdfDocEncoding_rev[char] = i
def encode_pdfdocencoding(unicode_string):
retval = ''
for c in unicode_string:
try:
retval += chr(_pdfDocEncoding_rev[c])
except KeyError:
raise UnicodeEncodeError("pdfdocencoding", c, -1, -1,
"does not exist in translation table")
return retval
def decode_pdfdocencoding(byte_array):
retval = u''
for b in byte_array:
c = _pdfDocEncoding[ord(b)]
if c == u'\u0000':
raise UnicodeDecodeError("pdfdocencoding", b, -1, -1,
"does not exist in translation table")
retval += c
return retval
# END PDFDocEncoding
##########################################################################################
| |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
The rack recyler allows to reuse stock racks.
AAB
"""
from thelma.tools.semiconstants import RACK_SHAPE_NAMES
from thelma.tools.iso.base import StockRackLayout
from thelma.tools.iso.base import StockRackPosition
from thelma.tools.iso.lab.base import LABELS
from thelma.tools.utils.base import VOLUME_CONVERSION_FACTOR
from thelma.tools.iso.lab.stockrack.base \
import _StockRackAssignerIsoJob
from thelma.tools.iso.lab.stockrack.base \
import _StockRackAssignerLabIso
from thelma.tools.iso.lab.stockrack.base import _StockRackAssigner
from thelma.tools.stock.base import STOCK_DEAD_VOLUME
from thelma.tools.stock.tubepicking import TubeCandidate
from thelma.tools.utils.base import CONCENTRATION_CONVERSION_FACTOR
from thelma.tools.utils.base import add_list_map_element
from thelma.tools.utils.base import are_equal_values
from thelma.tools.utils.base import get_nested_dict
from thelma.tools.utils.base import get_trimmed_string
from thelma.tools.utils.base import is_smaller_than
from thelma.tools.utils.racksector import RackSectorTranslator
from thelma.entities.sample import StockSample
__docformat__ = 'reStructuredText en'
__all__ = ['_StockRackRecycler']
class _StockRackRecycler(_StockRackAssigner):
"""
Assigns existing racks (with the tubes already in place) as stock
racks for a lab ISO or ISO job entity.
**Return Value:** The updated ISO job or ISO.
"""
def __init__(self, entity, rack_barcodes, **kw):
"""
Constructor:
:param entity: The ISO or the ISO job to which to assign the racks.
:type entity: :class:`LabIso` or :class:`IsoJob`
(see :attr:`_ENTITY_CLS).
:param rack_barcodes: The barcodes for the stock racks to be used.
:type rack_barcodes: :class:`list`
"""
_StockRackAssigner.__init__(self, entity=entity,
rack_barcodes=rack_barcodes, **kw)
#: The :class:`TubeCandidate` objects for each pool in the specified
#: racks.
self.__tube_candidates = None
#: Stores the barcode for each stock rack marker.
self._stock_rack_map = None
def reset(self):
_StockRackAssigner.reset(self)
self.__tube_candidates = dict()
self._stock_rack_map = dict()
def _find_stock_tubes(self):
"""
The racks must not container other tubes and volume and concentration
for each tube must match the expected data. The position of the
tubes is not checked, though.
"""
self.add_debug('Check suggested stock racks ...')
self.__create_tube_candidates()
if not self.has_errors(): self.__check_and_assign_tube_candidates()
if not self.has_errors(): self._check_position_contraints()
def __create_tube_candidates(self):
"""
Checks the tubes in the given racks and converts them into
:class:`TubeCandidate` objects.
Sample which are not stock samples or do not have a volume are ignored.
The number of tubes found must match the number of stock tube
containers.
"""
no_stock_sample = dict()
no_sample = dict()
num_tubes = 0
for barcode, rack in self._barcode_map.iteritems():
for tube in rack.containers:
stock_sample = tube.sample
rack_pos = tube.rack_position
num_tubes += 1
tube_info = '%s (%s)' % (tube.barcode, rack_pos.label)
if stock_sample is None:
add_list_map_element(no_sample, barcode, tube_info)
continue
sample_vol = stock_sample.volume * VOLUME_CONVERSION_FACTOR
if are_equal_values(sample_vol, 0):
add_list_map_element(no_sample, barcode, tube_info)
continue
if not isinstance(stock_sample, StockSample):
add_list_map_element(no_stock_sample, barcode, tube_info)
continue
pool = stock_sample.molecule_design_pool
tc = TubeCandidate(pool_id=pool.id,
rack_barcode=barcode,
rack_position=rack_pos,
tube_barcode=tube.barcode,
concentration=stock_sample.concentration,
volume=stock_sample.volume)
self.__tube_candidates[pool] = tc
if len(no_sample) > 0:
rack_strs = []
for barcode in sorted(no_sample.keys()):
info_str = '%s (%s)' % (barcode, self._get_joined_str(
no_sample[barcode]))
rack_strs.append(info_str)
msg = 'In some racks there are empty tubes: %s. Please remove ' \
'them and try again.' % (' -- '.join(rack_strs))
self.add_error(msg)
if len(no_stock_sample) > 0:
msg = 'The tubes in some of the racks you have specified contain ' \
'normal samples instead of stock samples. Talk to the ' \
'IT department, please. Details: %s.' \
% (self._get_joined_map_str(no_stock_sample))
self.add_error(msg)
exp_tube_num = len(self._stock_tube_containers)
if not self.has_errors() and not exp_tube_num == num_tubes:
msg = 'The number of tubes in the racks you have specified (%i) ' \
'is different from the expected one (%i). Remove all tubes ' \
'that are not required and add the missing ones or try ' \
'the generate a new stock rack.' % (num_tubes, exp_tube_num)
self.add_error(msg)
def __check_and_assign_tube_candidates(self):
"""
Checks whether there is pool for each requested pool and whether
volume and concentration match.
"""
missing_pools = []
invalid_conc = []
invalid_vol = []
for pool, container in self._stock_tube_containers.iteritems():
if not self.__tube_candidates.has_key(pool):
missing_pools.append(pool.id)
continue
candidate = self.__tube_candidates[pool]
exp_stock_conc = pool.default_stock_concentration \
* CONCENTRATION_CONVERSION_FACTOR
if not are_equal_values(candidate.concentration,
exp_stock_conc):
info = '%s (pool: %s, expected: %s nM, found: %s nM)' % (
candidate.tube_barcode, pool,
get_trimmed_string(exp_stock_conc),
get_trimmed_string(candidate.concentration))
invalid_conc.append(info)
continue
required_vol = STOCK_DEAD_VOLUME \
+ container.get_total_required_volume()
if is_smaller_than(candidate.volume, required_vol):
info = '%s (pool: %s, required: %s ul, found: %s ul)' % (
candidate.tube_barcode, pool,
get_trimmed_string(required_vol),
get_trimmed_string(candidate.volume))
invalid_vol.append(info)
continue
container.tube_candidate = candidate
if len(missing_pools) > 0:
msg = 'Could not find tubes for the following pools: %s.' % (
self._get_joined_str(missing_pools, is_strs=False))
self.add_error(msg)
if len(invalid_conc) > 0:
msg = 'The concentrations in some tubes do not match the ' \
'expected ones: %s.' % (self._get_joined_str(invalid_conc))
self.add_error(msg)
if len(invalid_vol) > 0:
msg = 'The volumes in some tubes (dead volume included) are not ' \
'sufficient: %s.' % (self._get_joined_str(invalid_vol))
self.add_error(msg)
def _check_position_contraints(self):
"""
Checks potential position constraints for the tube candidates
(e.g. rack sector matching).
Might allocate rack barcodes and rack markers.
"""
raise NotImplementedError('Abstract method.')
def _create_stock_rack_layouts(self):
"""
Creates stock rack layout for each required rack (potential rack
sector constraints are expected to be confirmed).
The rack markers might not match the layout rack markers anymore
but this does not matter, since further processing do not use the
layout rack markers anymore.
"""
marker_map = dict()
for marker, rack_barcode in self._stock_rack_map.iteritems():
marker_map[rack_barcode] = marker
layouts = dict()
used_rack_markers = set()
for pool, container in self._stock_tube_containers.iteritems():
tc = container.tube_candidate
rack_barcode = tc.rack_barcode
if marker_map.has_key(rack_barcode):
rack_marker = marker_map[rack_barcode]
if layouts.has_key(rack_barcode):
layout = layouts[rack_barcode]
else:
layout = StockRackLayout()
layouts[rack_barcode] = layout
else:
rack_marker = container.stock_rack_marker
if rack_marker in used_rack_markers:
nums = []
for marker in used_rack_markers:
value_parts = LABELS.parse_rack_marker(marker)
nums.append(value_parts[LABELS.MARKER_RACK_NUM])
new_num = max(nums) + 1
rack_marker = LABELS.create_rack_marker(LABELS.ROLE_STOCK,
new_num)
marker_map[rack_barcode] = rack_marker
layout = StockRackLayout()
layouts[rack_barcode] = layout
tts = []
for plate_label, positions in container.plate_target_positions.\
iteritems():
if plate_label == LABELS.ROLE_FINAL:
trg_marker = plate_label
else:
trg_marker = self._rack_containers[plate_label].rack_marker
for plate_pos in positions:
tts.append(plate_pos.as_transfer_target(trg_marker))
sr_pos = StockRackPosition(rack_position=tc.rack_position,
molecule_design_pool=pool,
tube_barcode=tc.tube_barcode,
transfer_targets=tts)
layout.add_position(sr_pos)
for rack_barcode, marker in marker_map.iteritems():
self._stock_rack_layouts[marker] = layouts[rack_barcode]
if not self._stock_rack_map.has_key(marker):
self._stock_rack_map[marker] = rack_barcode
def _get_stock_rack_map(self):
return self._stock_rack_map
def _create_output(self):
"""
There is nothing to be generated anymore. We only set the return value.
"""
self.return_value = self.entity
self.add_info('Stock racks assigning completed.')
class StockRackRecyclerIsoJob(_StockRackRecycler, _StockRackAssignerIsoJob):
"""
Assigns existing racks (with the tubes already in place) as stock
racks for a ISO job entity.
**Return Value:** The updated ISO job.
"""
NAME = 'Lab ISO Job Stock Rack Recycler'
#pylint: disable=W0231
def __init__(self, entity, rack_barcodes, **kw):
"""
Constructor:
:param entity: The ISO or the ISO job to which to assign the racks.
:type entity: :class:`LabIso` or :class:`IsoJob`
(see :attr:`_ENTITY_CLS).
:param rack_barcodes: The barcodes for the stock racks to be used.
:type rack_barcodes: :class:`list`
"""
_StockRackRecycler.__init__(self, entity=entity,
rack_barcodes=rack_barcodes, **kw)
#pylint: enable=W0231
def reset(self):
_StockRackRecycler.reset(self)
def _check_input(self):
_StockRackRecycler._check_input(self)
def _get_layouts(self):
_StockRackAssignerIsoJob._get_layouts(self)
def _find_starting_wells(self):
_StockRackAssignerIsoJob._find_starting_wells(self)
def _find_stock_tubes(self):
_StockRackRecycler._find_stock_tubes(self)
def _check_position_contraints(self):
"""
All job samples are transferred with the BioMek, hence, there are
no position constraints (although the layouts might not be optimized).
"""
pass
def _create_stock_rack_layouts(self):
_StockRackRecycler._create_stock_rack_layouts(self)
def _get_stock_transfer_pipetting_specs(self):
return _StockRackAssignerIsoJob._get_stock_transfer_pipetting_specs(self)
def _clear_entity_stock_racks(self):
_StockRackAssignerIsoJob._clear_entity_stock_racks(self)
def _create_stock_rack_entity(self, stock_rack_marker, base_kw):
return _StockRackAssignerIsoJob._create_stock_rack_entity(self,
stock_rack_marker, base_kw)
def _create_output(self):
_StockRackRecycler._create_output(self)
class StockRackRecyclerLabIso(_StockRackRecycler, _StockRackAssignerLabIso):
"""
Assigns existing racks (with the tubes already in place) as stock
racks for a lab ISO entity.
**Return Value:** The updated lab ISO.
"""
NAME = 'Lab ISO Stock Rack Recycler'
#pylint: disable=W0231
def __init__(self, entity, rack_barcodes, **kw):
"""
Constructor:
:param entity: The ISO or the ISO job to which to assign the racks.
:type entity: :class:`LabIso` or :class:`IsoJob`
(see :attr:`_ENTITY_CLS).
:param rack_barcodes: The barcodes for the stock racks to be used.
:type rack_barcodes: :class:`list`
"""
_StockRackRecycler.__init__(self, entity=entity,
rack_barcodes=rack_barcodes, **kw)
self._complete_init()
#: The :class:`RackSectorTranslator`s translating 384-well target
#: plate positins into stock rack positions mapped onto source
#: sector indices (for sector data only).
self.__ssc_layout_map = None
#pylint: enable=W0231
def reset(self):
_StockRackRecycler.reset(self)
self.__ssc_layout_map = dict()
def _check_input(self):
_StockRackRecycler._check_input(self)
def _get_layouts(self):
_StockRackAssignerLabIso._get_layouts(self)
def _find_stock_tubes(self):
_StockRackRecycler._find_stock_tubes(self)
def _check_position_contraints(self):
"""
Samples that shall be transferred via the CyBio must be located in
defined positions.
If all positions are consistent, the stock rack barcodes and rack
sectors are allocated.
"""
inconsistent_sectors = []
inconsistent_positions = []
self._stock_rack_sectors = dict()
expected_positions = self.__get_expected_sector_positions()
if not expected_positions is None:
for sector_index, sector_pools in expected_positions.iteritems():
rack_barcode = None
stock_rack_marker = None
sector_positions = []
for pool, exp_pos in sector_pools.iteritems():
container = self._stock_tube_containers[pool]
tc = container.tube_candidate
sector_positions.append(exp_pos)
if stock_rack_marker is None:
stock_rack_marker = container.stock_rack_marker
if rack_barcode is None:
rack_barcode = tc.rack_barcode
elif not rack_barcode == tc.rack_barcode:
inconsistent_sectors.append(sector_index)
break
if not exp_pos == tc.rack_position:
info = 'tube %s in rack %s (exp: %s, found: %s)' \
% (tc.tube_barcode, rack_barcode, exp_pos,
tc.rack_position)
inconsistent_positions.append(info)
continue
self._stock_rack_map[stock_rack_marker] = rack_barcode
self._stock_rack_sectors[sector_index] = sector_positions
if len(inconsistent_sectors) > 0:
msg = 'The pools for the following sectors are spread over ' \
'several racks: %s!' % (self._get_joined_str(
inconsistent_sectors, is_strs=False))
self.add_error(msg)
if len(inconsistent_positions) > 0:
msg = 'The following tubes scheduled for the CyBio are located ' \
'in wrong positions: %s.' \
% (self._get_joined_str(inconsistent_positions))
self.add_error(msg)
def __get_expected_sector_positions(self):
"""
Returns the expected position for each pool sorted by sector index.
"""
inconsistent = []
contains_non_sectors = False
has_sectors = False
expected_positions = dict()
for pool, container in self._stock_tube_containers.iteritems():
stock_rack_marker = container.stock_rack_marker
for plate_label, positions in container.plate_target_positions.\
iteritems():
plate_layout = self._plate_layouts[plate_label]
sector_positions = dict()
for plate_pos in positions:
sector_index = plate_pos.sector_index
if sector_index is None:
contains_non_sectors = True
continue
elif not has_sectors:
has_sectors = True
add_list_map_element(sector_positions, sector_index,
plate_pos)
if len(sector_positions) < 1: continue
exp_data = self.__get_expected_stock_rack_position(plate_layout,
sector_positions)
if exp_data is None:
inconsistent.append(pool.id)
else:
sector_index = exp_data[1]
self._stock_rack_sectors[stock_rack_marker] = sector_index
sector_pools = get_nested_dict(expected_positions,
sector_index)
exp_pos = exp_data[0]
sector_pools[pool] = exp_pos
if contains_non_sectors and has_sectors:
msg = 'The sector data for the layouts are inconsistent - some ' \
'sector indices for samples are None!'
self.add_error(msg)
return None
if inconsistent:
msg = 'The sector for the following pools are inconsistent ' \
'in the layouts: %s.' % (self._get_joined_str(inconsistent,
is_strs=False))
self.add_error(msg)
return None
return expected_positions
def __get_expected_stock_rack_position(self, plate_layout,
sector_positions):
"""
There can only be one positions, because otherwise the positions
do not match the sectors.
"""
stock_rack_positions = set()
ref_sectors = set()
for sector_index, positions in sector_positions.iteritems():
ref_sectors.add(sector_index)
if plate_layout.shape.name == RACK_SHAPE_NAMES.SHAPE_96:
for plate_pos in positions:
stock_rack_positions.add(plate_pos.rack_position)
else:
if self.__ssc_layout_map.has_key(sector_index):
translator = self.__ssc_layout_map[sector_index]
else:
translator = RackSectorTranslator(number_sectors=4,
source_sector_index=sector_index,
target_sector_index=0,
behaviour=RackSectorTranslator.ONE_TO_MANY)
self.__ssc_layout_map[sector_index] = translator
for plate_pos in positions:
base_msg = 'Error when trying to determine stock rack ' \
'position for position %s:' \
% (plate_pos.rack_position.label)
trans_pos = self._run_and_record_error(translator.translate,
base_msg, ValueError,
**dict(rack_position=plate_pos.rack_position))
if trans_pos is None: return None
stock_rack_positions.add(trans_pos)
if len(stock_rack_positions) > 1: return None
return (list(stock_rack_positions)[0], min(ref_sectors))
def _create_stock_rack_layouts(self):
_StockRackRecycler._create_stock_rack_layouts(self)
def _get_stock_transfer_pipetting_specs(self):
return _StockRackAssignerLabIso._get_stock_transfer_pipetting_specs(
self)
def _clear_entity_stock_racks(self):
_StockRackAssignerLabIso._clear_entity_stock_racks(self)
def _create_stock_rack_entity(self, stock_rack_marker, base_kw):
return _StockRackAssignerLabIso._create_stock_rack_entity(self,
stock_rack_marker, base_kw)
def _create_output(self):
_StockRackRecycler._create_output(self)
| |
'''
This class implements the AIML pattern-matching algorithm described
by Dr. Richard Wallace at the following site:
http://www.alicebot.org/documentation/matching.html
'''
from __future__ import print_function
import marshal
import pprint
import re
import string
import sys
from .constants import *
class PatternMgr:
# special dictionary keys
_UNDERSCORE = 0
_STAR = 1
_TEMPLATE = 2
_THAT = 3
_TOPIC = 4
_BOT_NAME = 5
def __init__(self):
self._root = {}
self._templateCount = 0
self._botName = u"Nameless"
punctuation = "\"`~!@#$%^&*()-_=+[{]}\|;:',<.>/?"
self._puncStripRE = re.compile("[" + re.escape(punctuation) + "]")
self._whitespaceRE = re.compile("\s+", re.UNICODE)
def numTemplates(self):
"""Return the number of templates currently stored."""
return self._templateCount
def setBotName(self, name):
"""Set the name of the bot, used to match <bot name="name"> tags in
patterns. The name must be a single word!
"""
# Collapse a multi-word name into a single word
self._botName = unicode( ' '.join(name.split()) )
def dump(self):
"""Print all learned patterns, for debugging purposes."""
pprint.pprint(self._root)
def save(self, filename):
"""Dump the current patterns to the file specified by filename. To
restore later, use restore().
"""
try:
outFile = open(filename, "wb")
marshal.dump(self._templateCount, outFile)
marshal.dump(self._botName, outFile)
marshal.dump(self._root, outFile)
outFile.close()
except Exception as e:
print( "Error saving PatternMgr to file %s:" % filename )
raise
def restore(self, filename):
"""Restore a previously save()d collection of patterns."""
try:
inFile = open(filename, "rb")
self._templateCount = marshal.load(inFile)
self._botName = marshal.load(inFile)
self._root = marshal.load(inFile)
inFile.close()
except Exception as e:
print( "Error restoring PatternMgr from file %s:" % filename )
raise
def add(self, data, template):
"""Add a [pattern/that/topic] tuple and its corresponding template
to the node tree.
"""
pattern,that,topic = data
# TODO: make sure words contains only legal characters
# (alphanumerics,*,_)
# Navigate through the node tree to the template's location, adding
# nodes if necessary.
node = self._root
for word in pattern.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
elif key == u"BOT_NAME":
key = self._BOT_NAME
if key not in node:
node[key] = {}
node = node[key]
# navigate further down, if a non-empty "that" pattern was included
if len(that) > 0:
if self._THAT not in node:
node[self._THAT] = {}
node = node[self._THAT]
for word in that.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# navigate yet further down, if a non-empty "topic" string was included
if len(topic) > 0:
if self._TOPIC not in node:
node[self._TOPIC] = {}
node = node[self._TOPIC]
for word in topic.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# add the template.
if self._TEMPLATE not in node:
self._templateCount += 1
node[self._TEMPLATE] = template
def match(self, pattern, that, topic):
"""Return the template which is the closest match to pattern. The
'that' parameter contains the bot's previous response. The 'topic'
parameter contains the current topic of conversation.
Returns None if no template is found.
"""
if len(pattern) == 0:
return None
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
# Pass the input off to the recursive call
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
return template
def star(self, starType, pattern, that, topic, index):
"""Returns a string, the portion of pattern that was matched by a *.
The 'starType' parameter specifies which type of star to find.
Legal values are:
- 'star': matches a star in the main pattern.
- 'thatstar': matches a star in the that pattern.
- 'topicstar': matches a star in the topic pattern.
"""
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
input_ = re.sub(self._whitespaceRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
topicInput = re.sub(self._whitespaceRE, " ", topicInput)
# Pass the input off to the recursive pattern-matcher
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template == None:
return ""
# Extract the appropriate portion of the pattern, based on the
# starType argument.
words = None
if starType == 'star':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
elif starType == 'thatstar':
patMatch = patMatch[patMatch.index(self._THAT)+1 : patMatch.index(self._TOPIC)]
words = thatInput.split()
elif starType == 'topicstar':
patMatch = patMatch[patMatch.index(self._TOPIC)+1 :]
words = topicInput.split()
else:
# unknown value
raise ValueError( "starType must be in ['star', 'thatstar', 'topicstar']" )
# compare the input string to the matched pattern, word by word.
# At the end of this loop, if foundTheRightStar is true, start and
# end will contain the start and end indices (in "words") of
# the substring that the desired star matched.
foundTheRightStar = False
start = end = j = numStars = k = 0
for i in range(len(words)):
# This condition is true after processing a star
# that ISN'T the one we're looking for.
if i < k:
continue
# If we're reached the end of the pattern, we're done.
if j == len(patMatch):
break
if not foundTheRightStar:
if patMatch[j] in [self._STAR, self._UNDERSCORE]: #we got a star
numStars += 1
if numStars == index:
# This is the star we care about.
foundTheRightStar = True
start = i
# Iterate through the rest of the string.
for k in range (i, len(words)):
# If the star is at the end of the pattern,
# we know exactly where it ends.
if j+1 == len (patMatch):
end = len (words)
break
# If the words have started matching the
# pattern again, the star has ended.
if patMatch[j+1] == words[k]:
end = k - 1
i = k
break
# If we just finished processing the star we cared
# about, we exit the loop early.
if foundTheRightStar:
break
# Move to the next element of the pattern.
j += 1
# extract the star words from the original, unmutilated input.
if foundTheRightStar:
#print( ' '.join(pattern.split()[start:end+1]) )
if starType == 'star': return ' '.join(pattern.split()[start:end+1])
elif starType == 'thatstar': return ' '.join(that.split()[start:end+1])
elif starType == 'topicstar': return ' '.join(topic.split()[start:end+1])
else: return u""
def _match(self, words, thatWords, topicWords, root):
"""Return a tuple (pat, tem) where pat is a list of nodes, starting
at the root and leading to the matching pattern, and tem is the
matched template.
"""
# base-case: if the word list is empty, return the current node's
# template.
if len(words) == 0:
# we're out of words.
pattern = []
template = None
if len(thatWords) > 0:
# If thatWords isn't empty, recursively
# pattern-match on the _THAT node with thatWords as words.
try:
pattern, template = self._match(thatWords, [], topicWords, root[self._THAT])
if pattern != None:
pattern = [self._THAT] + pattern
except KeyError:
pattern = []
template = None
elif len(topicWords) > 0:
# If thatWords is empty and topicWords isn't, recursively pattern
# on the _TOPIC node with topicWords as words.
try:
pattern, template = self._match(topicWords, [], [], root[self._TOPIC])
if pattern != None:
pattern = [self._TOPIC] + pattern
except KeyError:
pattern = []
template = None
if template == None:
# we're totally out of input. Grab the template at this node.
pattern = []
try: template = root[self._TEMPLATE]
except KeyError: template = None
return (pattern, template)
first = words[0]
suffix = words[1:]
# Check underscore.
# Note: this is causing problems in the standard AIML set, and is
# currently disabled.
if self._UNDERSCORE in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._UNDERSCORE])
if template is not None:
newPattern = [self._UNDERSCORE] + pattern
return (newPattern, template)
# Check first
if first in root:
pattern, template = self._match(suffix, thatWords, topicWords, root[first])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check bot name
if self._BOT_NAME in root and first == self._botName:
pattern, template = self._match(suffix, thatWords, topicWords, root[self._BOT_NAME])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check star
if self._STAR in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._STAR])
if template is not None:
newPattern = [self._STAR] + pattern
return (newPattern, template)
# No matches were found.
return (None, None)
| |
#!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - UINT8
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT8 :
- unsigned
- size = 8
- range : [0, 100]
Test cases :
------------
- UINT8 parameter min value = 0
- UINT8 parameter min value out of bounds = -1
- UINT8 parameter max value = 100
- UINT8 parameter max value out of bounds = 101
- UINT8 parameter in nominal case = 50
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT8 - range [0, 100]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT8"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT8 in nominal case = 50
----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT8 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing UINT8 minimal value = 0
-------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 parameter min value = 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 0
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("UINT8 parameter min value = 0")
value = "0"
hex_value = "0x0"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing UINT8 parameter value out of negative range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT8 parameter min value out of bounds = -1")
value = "-1"
param_check = open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing UINT8 parameter maximum value
-------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to 100
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 100
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("UINT8 parameter max value = 100")
value = "100"
hex_value = "0x64"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing UINT8 parameter value out of positive range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to 101
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT8 parameter max value out of bounds = 101")
value = "101"
param_check = open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteXf(unittest.TestCase):
"""
Test the Styles _write_xf() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_xf_1(self):
"""Test the _write_xf() method. Default properties."""
properties = {}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_2(self):
"""Test the _write_xf() method. Has font but is first XF."""
properties = {'has_font': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_3(self):
"""Test the _write_xf() method. Has font but isn't first XF."""
properties = {'has_font': 1, 'font_index': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="1" fillId="0" borderId="0" xfId="0" applyFont="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_4(self):
"""Test the _write_xf() method. Uses built-in number format."""
properties = {'num_format_index': 2}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="2" fontId="0" fillId="0" borderId="0" xfId="0" applyNumberFormat="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_5(self):
"""Test the _write_xf() method. Uses built-in number format + font."""
properties = {'num_format_index': 2, 'has_font': 1, 'font_index': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="2" fontId="1" fillId="0" borderId="0" xfId="0" applyNumberFormat="1" applyFont="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_6(self):
"""Test the _write_xf() method. Vertical alignment = top."""
properties = {'align': 'top'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="top"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_7(self):
"""Test the _write_xf() method. Vertical alignment = centre."""
properties = {'align': 'vcenter'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="center"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_8(self):
"""Test the _write_xf() method. Vertical alignment = bottom."""
properties = {'align': 'bottom'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_9(self):
"""Test the _write_xf() method. Vertical alignment = justify."""
properties = {'align': 'vjustify'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="justify"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_10(self):
"""Test the _write_xf() method. Vertical alignment = distributed."""
properties = {'align': 'vdistributed'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="distributed"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_11(self):
"""Test the _write_xf() method. Horizontal alignment = left."""
properties = {'align': 'left'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_12(self):
"""Test the _write_xf() method. Horizontal alignment = center."""
properties = {'align': 'center'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="center"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_13(self):
"""Test the _write_xf() method. Horizontal alignment = right."""
properties = {'align': 'right'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="right"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_14(self):
"""Test the _write_xf() method. Horizontal alignment = left + indent."""
properties = {'align': 'left', 'indent': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_15(self):
"""Test the _write_xf() method. Horizontal alignment = right + indent."""
properties = {'align': 'right', 'indent': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="right" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_16(self):
"""Test the _write_xf() method. Horizontal alignment = fill."""
properties = {'align': 'fill'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="fill"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_17(self):
"""Test the _write_xf() method. Horizontal alignment = justify."""
properties = {'align': 'justify'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="justify"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_18(self):
"""Test the _write_xf() method. Horizontal alignment = center across."""
properties = {'align': 'center_across'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="centerContinuous"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_19(self):
"""Test the _write_xf() method. Horizontal alignment = distributed."""
properties = {'align': 'distributed'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_20(self):
"""Test the _write_xf() method. Horizontal alignment = distributed + indent."""
properties = {'align': 'distributed', 'indent': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_21(self):
"""Test the _write_xf() method. Horizontal alignment = justify distributed."""
properties = {'align': 'justify_distributed'}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" justifyLastLine="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_22(self):
"""Test the _write_xf() method. Horizontal alignment = indent only."""
properties = {'indent': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_23(self):
"""Test the _write_xf() method. Horizontal alignment = distributed + indent."""
properties = {'align': 'justify_distributed', 'indent': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_24(self):
"""Test the _write_xf() method. Alignment = text wrap"""
properties = {'text_wrap': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment wrapText="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_25(self):
"""Test the _write_xf() method. Alignment = shrink to fit"""
properties = {'shrink': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment shrinkToFit="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_26(self):
"""Test the _write_xf() method. Alignment = reading order"""
properties = {'reading_order': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment readingOrder="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_27(self):
"""Test the _write_xf() method. Alignment = reading order"""
properties = {'reading_order': 2}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment readingOrder="2"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_28(self):
"""Test the _write_xf() method. Alignment = rotation"""
properties = {'rotation': 45}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="45"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_29(self):
"""Test the _write_xf() method. Alignment = rotation"""
properties = {'rotation': -45}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="135"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_30(self):
"""Test the _write_xf() method. Alignment = rotation"""
properties = {'rotation': 270}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="255"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_31(self):
"""Test the _write_xf() method. Alignment = rotation"""
properties = {'rotation': 90}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="90"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_32(self):
"""Test the _write_xf() method. Alignment = rotation"""
properties = {'rotation': -90}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="180"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_33(self):
"""Test the _write_xf() method. With cell protection."""
properties = {'locked': 0}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection locked="0"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_34(self):
"""Test the _write_xf() method. With cell protection."""
properties = {'hidden': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_35(self):
"""Test the _write_xf() method. With cell protection."""
properties = {'locked': 0, 'hidden': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection locked="0" hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_36(self):
"""Test the _write_xf() method. With cell protection + align."""
properties = {'align': 'right', 'locked': 0, 'hidden': 1}
xf_format = Format(properties)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1" applyProtection="1"><alignment horizontal="right"/><protection locked="0" hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| |
"""Frechet derivative of the matrix exponential."""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
__all__ = ['expm_frechet', 'expm_cond']
def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
"""
Frechet derivative of the matrix exponential of A in the direction E.
Parameters
----------
A : (N, N) array_like
Matrix of which to take the matrix exponential.
E : (N, N) array_like
Matrix direction in which to take the Frechet derivative.
method : str, optional
Choice of algorithm. Should be one of
- `SPS` (default)
- `blockEnlarge`
compute_expm : bool, optional
Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
Default is True.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
expm_A : ndarray
Matrix exponential of A.
expm_frechet_AE : ndarray
Frechet derivative of the matrix exponential of A in the direction E.
For ``compute_expm = False``, only `expm_frechet_AE` is returned.
See also
--------
expm : Compute the exponential of a matrix.
Notes
-----
This section describes the available implementations that can be selected
by the `method` parameter. The default method is *SPS*.
Method *blockEnlarge* is a naive algorithm.
Method *SPS* is Scaling-Pade-Squaring [1]_.
It is a sophisticated implementation which should take
only about 3/8 as much time as the naive implementation.
The asymptotics are the same.
.. versionadded:: 0.13.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
Computing the Frechet Derivative of the Matrix Exponential,
with an application to Condition Number Estimation.
SIAM Journal On Matrix Analysis and Applications.,
30 (4). pp. 1639-1657. ISSN 1095-7162
Examples
--------
>>> import scipy.linalg
>>> A = np.random.randn(3, 3)
>>> E = np.random.randn(3, 3)
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> expm_A.shape, expm_frechet_AE.shape
((3, 3), (3, 3))
>>> import scipy.linalg
>>> A = np.random.randn(3, 3)
>>> E = np.random.randn(3, 3)
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> M = np.zeros((6, 6))
>>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A
>>> expm_M = scipy.linalg.expm(M)
>>> np.allclose(expm_A, expm_M[:3, :3])
True
>>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
True
"""
if check_finite:
A = np.asarray_chkfinite(A)
E = np.asarray_chkfinite(E)
else:
A = np.asarray(A)
E = np.asarray(E)
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be a square matrix')
if E.ndim != 2 or E.shape[0] != E.shape[1]:
raise ValueError('expected E to be a square matrix')
if A.shape != E.shape:
raise ValueError('expected A and E to be the same shape')
if method is None:
method = 'SPS'
if method == 'SPS':
expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
elif method == 'blockEnlarge':
expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
else:
raise ValueError('Unknown implementation %s' % method)
if compute_expm:
return expm_A, expm_frechet_AE
else:
return expm_frechet_AE
def expm_frechet_block_enlarge(A, E):
"""
This is a helper function, mostly for testing and profiling.
Return expm(A), frechet(A, E)
"""
n = A.shape[0]
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expm_M = scipy.linalg.expm(M)
return expm_M[:n, :n], expm_M[:n, n:]
"""
Maximal values ell_m of ||2**-s A|| such that the backward error bound
does not exceed 2**-53.
"""
ell_table_61 = (
None,
# 1
2.11e-8,
3.56e-4,
1.08e-2,
6.49e-2,
2.00e-1,
4.37e-1,
7.83e-1,
1.23e0,
1.78e0,
2.42e0,
# 11
3.13e0,
3.90e0,
4.74e0,
5.63e0,
6.56e0,
7.52e0,
8.53e0,
9.56e0,
1.06e1,
1.17e1,
)
# The b vectors and U and V are copypasted
# from scipy.sparse.linalg.matfuncs.py.
# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
def _diff_pade3(A, E, ident):
b = (120., 60., 12., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
U = A.dot(b[3]*A2 + b[1]*ident)
V = b[2]*A2 + b[0]*ident
Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
Lv = b[2]*M2
return U, V, Lu, Lv
def _diff_pade5(A, E, ident):
b = (30240., 15120., 3360., 420., 30., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[5]*M4 + b[3]*M2) +
E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade7(A, E, ident):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade9(A, E, ident):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
A8 = np.dot(A4, A4)
M8 = np.dot(A4, M4) + np.dot(M4, A4)
U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def expm_frechet_algo_64(A, E):
n = A.shape[0]
s = None
ident = np.identity(n)
A_norm_1 = scipy.linalg.norm(A, 1)
m_pade_pairs = (
(3, _diff_pade3),
(5, _diff_pade5),
(7, _diff_pade7),
(9, _diff_pade9))
for m, pade in m_pade_pairs:
if A_norm_1 <= ell_table_61[m]:
U, V, Lu, Lv = pade(A, E, ident)
s = 0
break
if s is None:
# scaling
s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
A = A * 2.0**-s
E = E * 2.0**-s
# pade order 13
A2 = np.dot(A, A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
W = np.dot(A6, W1) + W2
U = np.dot(A, W)
V = np.dot(A6, Z1) + Z2
Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
Lu = np.dot(A, Lw) + np.dot(E, W)
Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
# factor once and solve twice
lu_piv = scipy.linalg.lu_factor(-U + V)
R = scipy.linalg.lu_solve(lu_piv, U + V)
L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
# squaring
for k in range(s):
L = np.dot(R, L) + np.dot(L, R)
R = np.dot(R, R)
return R, L
def vec(M):
"""
Stack columns of M to construct a single vector.
This is somewhat standard notation in linear algebra.
Parameters
----------
M : 2d array_like
Input matrix
Returns
-------
v : 1d ndarray
Output vector
"""
return M.T.ravel()
def expm_frechet_kronform(A, method=None, check_finite=True):
"""
Construct the Kronecker form of the Frechet derivative of expm.
Parameters
----------
A : array_like with shape (N, N)
Matrix to be expm'd.
method : str, optional
Extra keyword to be passed to expm_frechet.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
K : 2d ndarray with shape (N*N, N*N)
Kronecker form of the Frechet derivative of the matrix exponential.
Notes
-----
This function is used to help compute the condition number
of the matrix exponential.
See also
--------
expm : Compute a matrix exponential.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
expm_cond : Compute the relative condition number of the matrix exponential
in the Frobenius norm.
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
ident = np.identity(n)
cols = []
for i in range(n):
for j in range(n):
E = np.outer(ident[i], ident[j])
F = expm_frechet(A, E,
method=method, compute_expm=False, check_finite=False)
cols.append(vec(F))
return np.vstack(cols).T
def expm_cond(A, check_finite=True):
"""
Relative condition number of the matrix exponential in the Frobenius norm.
Parameters
----------
A : 2d array_like
Square input matrix with shape (N, N).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
kappa : float
The relative condition number of the matrix exponential
in the Frobenius norm
Notes
-----
A faster estimate for the condition number in the 1-norm
has been published but is not yet implemented in scipy.
.. versionadded:: 0.14.0
See also
--------
expm : Compute the exponential of a matrix.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
Examples
--------
>>> from scipy.linalg import expm_cond
>>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
>>> k = expm_cond(A)
>>> k
1.7787805864469866
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
X = scipy.linalg.expm(A)
K = expm_frechet_kronform(A, check_finite=False)
# The following norm choices are deliberate.
# The norms of A and X are Frobenius norms,
# and the norm of K is the induced 2-norm.
A_norm = scipy.linalg.norm(A, 'fro')
X_norm = scipy.linalg.norm(X, 'fro')
K_norm = scipy.linalg.norm(K, 2)
kappa = (K_norm * A_norm) / X_norm
return kappa
| |
#!python3
"""
Predict protein pKa based on MCCE method.
http://pka.engr.ccny.cuny.edu/
Require MCCE 3.0 to work: https://anaconda.org/SalahSalah/mcce/files
"""
import asyncio
import glob
import gzip
import locale
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
from multiprocessing import Pool
from urllib.request import urlopen
import aioftp
import pandas as pd
import uvloop
# Sapelo Locale is broken, quick fix
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# Set working directory
ROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(ROOTPATH)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(f"./pKa_calculation_{__file__}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s\t"
"[%(filename)s:%(lineno)s -%(funcName)12s()]\t%(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
class pdb:
def __init__(self):
self.all_ids = []
self.download_ids = [] # Download -> Unzip -> Preprocess -> Calculate
self.unzip_ids = [] # Unzip -> Preprocess -> Calculate
self.preprocess_ids = [] # Preprocess -> Calculate
self.ready_ids = [] # Calculate
self.finished_ids = [] # Successfully calculated IDs
self.error_ids = [] # Error in download, unzip, or calculation
# IDs this script will work on (messy queue implementation)
self.working_ids = []
def load_id(self):
"""
First try to get existing pKa values,
then get the list of PDB files to download.
"""
for folder in ["./pdb", "./annotation", "./results"]:
try:
os.makedirs(folder)
except OSError:
pass
self.finished_ids = [id[-8:-4] for id in glob.glob("./results/*.pka")]
logger.debug(f"{len(self.finished_ids)} finished files.")
# Create file even at first run so that the results folder doesn't get deleted
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(self.finished_ids))
self.ready_ids = list(set(
[id[-12:-8].upper() for id in glob.glob("./pdb/*/*.pdb.bak")]) - set(self.finished_ids))
logger.debug(f"{len(self.ready_ids)} files ready to be calculated.")
self.preprocess_ids = list(set([id[-8:-4].upper() for id in glob.glob(
"./pdb/*/*.pdb") if "out" not in id]) - set(self.finished_ids) - set(self.ready_ids))
logger.debug(
f"{len(self.preprocess_ids)} files ready to be preprocessed.")
self.unzip_ids = [id[-11:-7].upper() for id in glob.glob("./*.ent.gz")]
logger.debug(f"{len(self.unzip_ids)} files ready to be unzipped.")
if not os.path.exists("./annotation/uniprot_id_mapping.dat"):
with urlopen("ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz") as remotefile:
logger.debug(
"Saving UniProt ID mapping data since it doesn't exist...")
with open("./annotation/uniprot_id_mapping.dat.gz", "wb") as f:
f.write(remotefile.read())
with gzip.open(
"./annotation/uniprot_id_mapping.dat.gz", "rb") as inFile, open(
"./annotation/uniprot_id_mapping.dat", "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove("./annotation/uniprot_id_mapping.dat.gz")
else:
logger.debug("UniProt ID mapping data exists.")
logger.debug("Reading all possible PDB IDs...")
annot = pd.read_csv("./annotation/uniprot_id_mapping.dat",
sep="\t", header=None,
names=["uniprot", "id", "value"])
self.all_ids = annot.loc[annot.id == "PDB", "value"].tolist()
self.download_ids = list(set(self.all_ids) - set(self.unzip_ids) - set(
self.preprocess_ids) - set(self.ready_ids) - set(self.finished_ids))
logger.info(
f"{len(self.download_ids)} PDB files need to be downloaded.")
def get_link(self, ids):
""" Get PDB file links from:
ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/ ,
and create folders to store the files.
Parameters
----------
ids: list
The PDB IDs to download.
Returns
-------
Links to download.
"""
if isinstance(ids, list):
ids = [id[:4].lower() for id in ids] # pdb file IDs
pdb_names = [f"{id}.ent.gz" for id in ids] # pdb filenames
# subdirectory of the pdb files
pdbDirs = [id[1:3].lower() for id in ids]
remoteaddr = [
f"ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{pdbDir}/pdb{pdb_name}" for pdbDir, pdb_name in zip(pdbDirs, pdb_names)]
else:
raise TypeError(f"{id} is not a string or list.")
return remoteaddr
def make_dirs(self, ids):
"""Make sure the download directory exists."""
for id in ids:
try:
os.makedirs(os.path.join(ROOTPATH, "pdb", id.upper()))
except OSError:
pass
async def download_worker(self, session, url):
"""Download the given url to working directory."""
url = url[len("ftp://ftp.wwpdb.org"):]
logger.debug(f"Downloading {url}")
try:
await session.download(url)
self.unzip_ids.append(url[-11:-7].upper())
except Exception as e:
self.error_ids.append(url[-11:-7].upper())
logger.warning(f"Error when downloading {url}: {e}")
async def download_session(self, sem, work_queue):
""" Get urls from the queue and pass to worker.
Parameters
----------
sem: asyncio.Semaphore object
work_queue: asyncio.Queue object
"""
while not work_queue.empty():
url = await work_queue.get()
logger.debug(f"Got url from queue: {url}")
async with sem:
async with aioftp.ClientSession("ftp.wwpdb.org") as session:
await self.download_worker(session, url)
def download_queue(self, urls):
""" Create a queue to download all the given urls.
Parameters
----------
urls: list
A list of urls to download.
Returns
-------
Downloaded "*.ent.gz" files in working directory.
"""
logger.debug(f"{len(urls)} urls to download.")
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
q = asyncio.Queue()
sem = asyncio.Semaphore(10)
[q.put_nowait(url) for url in urls]
tasks = [asyncio.ensure_future(self.download_session(sem, q))
for _ in range(len(urls))]
loop.run_until_complete(asyncio.gather(*tasks))
# Zero-sleep to allow underlying connections to close
loop.run_until_complete(asyncio.sleep(0))
loop.close()
def check_mcce(self):
"""Check if MCCE 3.0 exists."""
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0")):
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0.tar.bz2")):
logger.debug("MCCE isn't downloaded yet. Retrieving...")
with urlopen("https://anaconda.org/SalahSalah/mcce/3.0/download/linux-64/mcce-3.0-0.tar.bz2") as remotefile:
with open("./mcce-3.0-0.tar.bz2", 'wb') as f:
f.write(remotefile.read())
subprocess.run(["tar", "-xjf", "mcce-3.0-0.tar.bz2"])
shutil.move("./info/recipe/mcce3.0", "./mcce3.0")
shutil.rmtree(os.path.join(ROOTPATH, "info"), ignore_errors=True)
shutil.rmtree(os.path.join(ROOTPATH, "bin"), ignore_errors=True)
else:
logger.info("MCCE 3.0 exists, proceeding to calculation...")
def unzip(self, id):
"""Unzip downloaded *.ent.gz file."""
try:
saved_pdb = os.path.join(ROOTPATH, "pdb", id, f"{id}.pdb")
with gzip.open(f"pdb{id.lower()}.ent.gz", "rb") as inFile, open(saved_pdb, "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove(f"pdb{id.lower()}.ent.gz")
self.preprocess_ids.append(id)
except Exception as e:
self.error_ids.append(id)
logger.warning(f"Unzip of {id} unsuccessful: {e}")
def preprocess(self, id, backup=True):
"""
This program will:
1) strip lines other than ATOM and HETATM records
2) keep the first model of an NMR structure
3) delete H and D atoms
4) MSE to MET residue
5) keep only one atom alternate position
6) keep defined chains, if chain ID(s) are given in command
7) remove some cofactors and salt ions
Parameters
----------
id: str
The PDB ID to find the file.
backup: bool, optional
Whether to backup the original file or not. Default is True,
and save to "original.bak".
Returns
-------
Nothing, modify the file in place.
"""
removable_res = [
" ZN", "PCA", "XYP", " NA", " CL", " CA", " MG", " MN", "HOH"
]
model_start = False
newlines = []
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID, f"{ID}.pdb")
if backup:
shutil.copy2(filepath, f"{filepath}.bak")
with open(filepath) as f:
for line in f:
if line[:5] == "MODEL":
model_start = True
if model_start and line[:6] == "ENDMDL":
break
if line[:6] != "ATOM " and line[:6] != "HETATM":
continue # discard non ATOM records
if line[13] == "H" or line[12] == "H":
continue
if line[16] == "A":
line = f"{line[:16]} {line[17:]}"
elif line[16] != " ":
continue # delete this line, alternative posion is not A or empty
if line[:6] == "HETATM" and line[17:20] == "MSE":
if line[12:15] == "SE ":
line = f"ATOM {line[6:12]} SD{line[15:17]}MET{line[20:]}"
else:
line = f"ATOM {line[6:17]}MET{line[20:]}"
res = line[17:20]
if res in removable_res:
continue
newlines.append(line.rstrip())
with open(filepath, "w") as f:
f.write("\n".join(newlines))
logger.debug(f"{ID} preprocessing complete.")
def set_params(self, id, quickrun=True):
"""
Set the parameters for MCCE.
Parameters
----------
id: str
The PDB ID of the file.
quickrun: bool, optional
Use "run.prm.quick" or "run.prm.default".
Returns
-------
run.prm: a file describing the parameters that points to the PDB file.
"""
pkgpath = os.path.join(ROOTPATH, "mcce3.0")
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID)
newlines = []
if quickrun:
shutil.copy2(
os.path.join(pkgpath, "run.prm.quick"),
os.path.join(filepath, "run.prm")
)
else:
shutil.copy2([
os.path.join(pkgpath, "run.prm.default"),
os.path.join(filepath, "run.prm")
])
with open(os.path.join(filepath, "run.prm")) as f:
for line in f:
line = line.rstrip()
if line.endswith("(INPDB)"):
line = re.sub(r"^[^\s]+", fr"{id}.pdb", line)
if line.endswith(("(DO_PREMCCE)", "(DO_ROTAMERS)",
"(DO_ENERGY)", "(DO_MONTE)")):
line = re.sub(r"^f", r"t", line)
if line.endswith("(EPSILON_PROT)"):
line = re.sub(r"^[\d\.]+", r"8.0", line)
if line.startswith("/home/mcce/mcce3.0"):
line = re.sub(r"^/.*3\.0", pkgpath,
line)
newlines.append(line)
with open(os.path.join(filepath, "run.prm"), "w") as f:
f.write("\n".join(newlines))
self.ready_ids.append(ID)
logger.debug(f"Parameters set for {ID}.")
def split_ready_ids(self, num):
""" A naive queue implementation for multiple scripts.
Parameters
----------
num: int
Which part of the IDs to work on.
Returns
-------
A list of the actual IDs to work on, and save the lists of IDs for
other scripts to work with if this is the first instance.
"""
if os.path.isfile(os.path.join(ROOTPATH, "results", "working_ids.list")):
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{num}"), "r") as f:
self.working_ids = [line.strip() for line in f]
else:
n = math.ceil(len(self.ready_ids) / 10)
self.working_ids = [self.ready_ids[i:i + n]
for i in range(0, len(self.ready_ids), n)]
metafile = []
for i, ids in enumerate(self.working_ids):
metafile.append(os.path.join(
ROOTPATH, "results", f"working_ids.list{i}"))
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{i}"), "w") as f:
f.write("\n".join(ids))
logger.debug(
f"Saved {len(ids)} IDs to file working_ids.list{i} .")
with open(os.path.join(ROOTPATH, "results", "working_ids.list"), "w") as f:
f.write("\n".join(metafile))
self.working_ids = self.working_ids[num]
def calc_pka(self, id, clean=True):
""" Calculate protein pKa values using MCCE.
https://sites.google.com/site/mccewiki/home
Parameters
----------
id: str
The PDB ID of the protein calculated.
clean: bool, optional
Only keep the PDB file, run log and pKa output.
Returns
-------
A set of files in a subdirectory named after the ID.
See user manual for detail.
"""
id = id.upper()
os.chdir(os.path.realpath(os.path.join(ROOTPATH, "pdb", id)))
logger.info(f"{id} calculation started.")
start = time.time()
with open(f"{id}.run.log", "w") as f:
subprocess.run(f"{ROOTPATH}/mcce3.0/mcce", stdout=f)
with open(f"{id}.run.log", "rb") as f:
last = f.readlines()[-1].decode().lstrip()
if last.startswith(("Fatal", "FATAL", "WARNING", "STOP")):
self.error_ids.append(id)
logger.warning(
f"{id} calculation aborted after {time.time() - start}s, due to {last}")
else:
self.finished_ids.append(id)
logger.info(
f"{id} calculation finished, used {time.time() - start}s.")
shutil.move("pK.out", os.path.join(
ROOTPATH, "results", f"{id}.pka"))
if clean:
del_list = [i for i in os.listdir() if i not in (
"pK.out", f"{id}.run.log", f"{id}.pdb.bak")]
[os.remove(item) for item in del_list]
if __name__ == "__main__":
x = pdb()
x.load_id()
urls = x.get_link(x.download_ids)
x.make_dirs(x.all_ids)
x.download_queue(urls)
x.check_mcce()
for id in x.unzip_ids:
x.unzip(id)
for id in x.preprocess_ids:
try:
x.preprocess(id)
x.set_params(id)
except Exception as e:
x.error_ids.append(id)
logger.warning(f"Preprocess of {id}: {e}")
# subprocess.run(["find", ".", "-type", "d", "-empty", "-delete"])
x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists
with Pool(os.cpu_count()) as p:
p.map(x.calc_pka, x.working_ids)
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(x.working_ids))
with open("./results/error_ids.list", "a") as f:
f.write("\n".join(x.error_ids))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import Axon
import zlib
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
from Kamaelia.UI.PygameDisplay import PygameDisplay
import pygame
from datetime import datetime
from zipfile import ZipFile
import os
#from FileDialog import *
from Tkinter import *
from tkFileDialog import askopenfilename
from tkSimpleDialog import askstring
from tkMessageBox import *
try:
import Image
except ImportError:
print "WARNING: Python Imaging Library Not available, defaulting to bmp only mode"
class Canvas(Axon.Component.component):
"""\
Canvas component - pygame surface that accepts drawing instructions
"""
Inboxes = { "inbox" : "Receives drawing instructions",
"control" : "",
"fromDisplay" : "For receiving replies from PygameDisplay service",
"eventsIn" : "For receiving PygameDisplay events",
}
Outboxes = { "outbox" : "Issues drawing instructions",
"signal" : "",
"toDisplay" : "For sending requests to PygameDisplay service",
"toApp" : "Send requests to app - for calibration", # MODIFICATION
"eventsOut" : "Events forwarded out of here",
"surfacechanged" : "If the surface gets changed from last load/save a 'dirty' message is emitted here",
"toTicker" : "Send data to text ticker",
"toHistory" : "Move to first slide",
}
def __init__(self, position=(0,0), size=(1024,768), bgcolour=(255,255,255), notepad="Scribbles"):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Canvas,self).__init__()
self.position = position
self.size = size
self.antialias = False
self.bgcolour = bgcolour
self.notepad = notepad
if self.antialias == True:
self.pygame_draw_line = pygame.draw.aaline
else:
self.pygame_draw_line = pygame.draw.line
self.dirty_sent = False
def waitBox(self,boxname):
waiting = True
while waiting:
if self.dataReady(boxname):
return
else:
yield 1
def requestDisplay(self, **argd):
displayservice = PygameDisplay.getDisplayService()
self.link((self,"toDisplay"), displayservice)
#argd["transparency"] = self.bgcolour
self.send(argd, "toDisplay")
self.send(argd, "toApp") # MODIFICATION
for _ in self.waitBox("fromDisplay"):
yield 1
self.surface = self.recv("fromDisplay")
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def main(self):
"""Main loop"""
yield 1
yield 1
yield 1
yield 1
yield WaitComplete(
self.requestDisplay( DISPLAYREQUEST=True,
callback = (self,"fromDisplay"),
events = (self, "eventsIn"),
size = self.size,
position = self.position,
)
)
self.surface.fill( (self.bgcolour) )
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
self.send({"REDRAW":True, "surface":self.surface}, "toApp") # MODIFICATION
self.send( {"ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN, "surface" : self.surface},
"toDisplay" )
self.send( {"ADDLISTENEVENT" : pygame.MOUSEMOTION, "surface" : self.surface},
"toDisplay" )
self.send( {"ADDLISTENEVENT" : pygame.MOUSEBUTTONUP, "surface" : self.surface},
"toDisplay" )
while not self.finished():
self.redrawNeeded = False
while self.dataReady("inbox"):
msgs = self.recv("inbox")
# \
# print repr(msgs)
for msg in msgs:
cmd = msg[0]
args = msg[1:]
# parse commands here
self.handleCommand(cmd, *args)
yield 1
if self.redrawNeeded:
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
self.send({"REDRAW":True, "surface":self.surface}, "toApp") #MODIFICATION
if not self.clean:
if not self.dirty_sent:
self.send("dirty", "surfacechanged")
self.dirty_sent = True
# pass on events received from pygame display
while self.dataReady("eventsIn"):
self.send( self.recv("eventsIn"), "eventsOut" )
self.pause()
yield 1
def handleCommand(self, cmd, *args):
#
# Could really take a dispatch pattern
# Would then be pluggable.
#
cmd = cmd.upper()
if cmd=="CLEAR":
self.clear(args)
self.clean = True
self.dirty_sent = False
elif cmd=="LINE":
self.line(args)
elif cmd=="CIRCLE":
self.circle(args)
self.clean = False
elif cmd=="LOAD":
self.load(args)
self.clean = True
self.dirty_sent = False
elif cmd=="SAVE":
self.save(args)
self.clean = True
self.dirty_sent = False
elif cmd=="LOADDECK":
self.loaddeck(args)
self.clean = True
self.dirty_sent = False
elif cmd=="SAVEDECK":
self.savedeck(args)
self.clean = True
self.dirty_sent = False
elif cmd=="CLEARSCRIBBLES":
self.clearscribbles(args)
self.clean = True
self.dirty_sent = False
elif cmd=="DELETESLIDE":
self.deleteslide(args)
self.clean = True
self.dirty_sent = False
elif cmd=="GETIMG":
self.getimg(args)
self.clean = False
elif cmd=="SETIMG":
self.setimg(args)
self.clean = False
elif cmd=="WRITE":
self.write(args)
self.clean = False
elif cmd=="CAM":
self.webcam(args)
self.clean = True
self.dirty_sent = True
elif cmd== "QUIT":
self.quit(args)
def line(self, args):
(r,g,b,sx,sy,ex,ey) = [int(v) for v in args[0:7]]
self.pygame_draw_line(self.surface, (r,g,b), (sx,sy), (ex,ey))
# pygame.draw.aaline(self.surface, (r,g,b), (sx,sy), (ex,ey))
self.redrawNeeded = True
if not((sy <0) or (ey <0)):
self.clean = False
def clear(self, args):
if len(args) == 3:
self.surface.fill( [int(a) for a in args[0:3]] )
else:
self.surface.fill( (self.bgcolour) )
self.redrawNeeded = True
self.send("dirty", "surfacechanged")
self.dirty_sent = True
self.clean = True
def circle(self, args):
(r,g,b,x,y,radius) = [int(v) for v in args[0:6]]
pygame.draw.circle(self.surface, (r,g,b), (x,y), radius, 0)
self.redrawNeeded = True
def load(self, args):
filename = args[0]
# print "ARGS", args
try:
loadedimage = pygame.image.load(filename)
except:
pass
else:
self.surface.blit(loadedimage, (0,0))
self.redrawNeeded = True
if not ( (len(args) >1) and args[1] == "nopropogate" ):
self.getimg(())
self.clean = True
def save(self, args):
filename = args[0]
try:
imagestring = pygame.image.tostring(self.surface,"RGB")
pilImage = Image.fromstring("RGB", self.surface.get_size(), imagestring)
pilImage.save(filename)
except NameError:
pygame.image.save(self.surface, filename)
self.clean = True
def loaddeck(self, args):
root = Tk()
root.withdraw()
filename = askopenfilename(filetypes=[("Zip Archives",".zip")],initialdir="Decks",title="Load Slide Deck",parent=root)
root.destroy()
root = Tk()
root.withdraw()
password = askstring("Deck Password","Please enter the password for this zip file, or leave blank if there is no password:", parent=root)
root.destroy()
if (filename):
try:
unzipped = ZipFile(filename)
self.clearscribbles("")
unzipped.extractall(path=self.notepad,pwd=password)
files = os.listdir(self.notepad)
files.sort()
loadstring = self.notepad + "/" + files[0]
self.send("first", "toHistory")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Deck loaded successfully","toTicker")
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to open the deck specified. You may have entered the password incorrectly","toTicker")
self.clean = True
def savedeck(self, args):
dt = datetime.now()
filename = dt.strftime("%Y%m%d-%H%M%S")
filename = filename + ".zip"
num_pages = len(os.listdir(self.notepad))
root = Tk()
root.withdraw()
password = askstring("Deck Password","Please enter a password for the zip file, or leave blank for no password:", parent=root)
root.destroy()
try:
if (password != ""):
#os.system("zip", "-j", "-q", "-P " + password, "Decks/" + filename, self.notepad + "/*.png")
os.system("zip -j -q -P " + password + " Decks/" + filename + " " + self.notepad + "/*.png")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file 'Decks/" + filename + "' created successfully with password","toTicker")
else:
os.system("zip -j -q Decks/" + filename + " " + self.notepad + "/*.png")
"""zipped = ZipFile('Decks/' + filename,'w') # This seems to have broken
for x in range(num_pages + 1):
if (x > 0):
zipped.write(self.notepad + "/" + "slide." + str(x) + ".png", "slide." + str(x) + ".png")
zipped.close()"""
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file 'Decks/" + filename + "' created successfully without password","toTicker")
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to write to zip file 'Decks/" + filename + "'","toTicker")
self.clean = True
def clearscribbles(self, args):
try:
#for x in os.listdir(self.notepad):
for x in os.listdir(self.notepad):
if (os.path.splitext(x)[1] == ".png"):
os.remove(self.notepad + "/" + x)
self.clear("")
self.send("first", "toHistory")
except Exception, e:
pass
self.clean = True
def deleteslide(self, args):
self.clear("")
self.send("delete", "toHistory")
self.clean = True
def getimg(self, args):
imagestring = pygame.image.tostring(self.surface,"RGB")
imagestring = zlib.compress(imagestring)
w,h = self.surface.get_size()
self.send( [["SETIMG",imagestring,str(w),str(h),"RGB"]], "outbox" )
# print "GETIMG"
def setimg(self, args):
w,h = int(args[1]), int(args[2])
imagestring = zlib.decompress(args[0])
recvsurface = pygame.image.frombuffer(imagestring, (w,h), args[3])
self.surface.blit(recvsurface, (0,0))
self.redrawNeeded = True
def write(self, args):
x,y,size,r,g,b = [int(a) for a in args[0:6]]
text = args[6]
font = pygame.font.Font(None,size)
textimg = font.render(text, self.antialias, (r,g,b))
self.surface.blit(textimg, (x,y))
self.redrawNeeded = True
def webcam(self, args):
snapshot = args[0]
imageorigin = args[1]
location = args[2]
self.surface.blit(snapshot, imageorigin) # temp
if (location == "local"):
imageorigin = (imageorigin[0], imageorigin[1] + 141)
self.surface.blit(snapshot, imageorigin)
self.redrawNeeded = True
self.send({"REDRAW":True, "surface":self.surface}, "toDisplay")
#self.send("dirty", "surfacechanged")
def quit(self, args):
root = Tk()
root.withdraw()
kill = False
if (askyesno("Confirm","Unsaved changes will be lost. Are you sure you want to quit?",parent=root)):
# perform quit
kill = True
#pygame.quit() # This isn't the right way to do it!
# Also, saving won't work as the program exits before it's happened
root.destroy()
if (kill):
print("Exiting")
self.scheduler.stop()
| |
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import logging
import math
import urllib
import urlparse
from oslo.config import cfg
from glance.common import auth
from glance.common import exception
from glance.openstack.common import excutils
import glance.store
import glance.store.driver
import glance.store.location
try:
import swiftclient
except ImportError:
pass
LOG = logging.getLogger(__name__)
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
swift_opts = [
cfg.BoolOpt('swift_enable_snet', default=False,
help=_('Whether to use ServiceNET to communicate with the '
'Swift storage servers.')),
cfg.StrOpt('swift_store_auth_address',
help=_('The address where the Swift authentication service '
'is listening.')),
cfg.StrOpt('swift_store_user', secret=True,
help=_('The user to authenticate against the Swift '
'authentication service')),
cfg.StrOpt('swift_store_key', secret=True,
help=_('Auth key for the user authenticating against the '
'Swift authentication service.')),
cfg.StrOpt('swift_store_auth_version', default='2',
help=_('Version of the authentication service to use. '
'Valid versions are 2 for keystone and 1 for swauth '
'and rackspace')),
cfg.BoolOpt('swift_store_auth_insecure', default=False,
help=_('If True, swiftclient won\'t check for a valid SSL '
'certificate when authenticating.')),
cfg.StrOpt('swift_store_region',
help=_('The region of the swift endpoint to be used for '
'single tenant. This setting is only necessary if the '
'tenant has multiple swift endpoints.')),
cfg.StrOpt('swift_store_endpoint_type', default='publicURL',
help=_('A string giving the endpoint type of the swift '
'service to use (publicURL, adminURL or internalURL). '
'This setting is only used if swift_store_auth_version '
'is 2.')),
cfg.StrOpt('swift_store_service_type', default='object-store',
help=_('A string giving the service type of the swift service '
'to use. This setting is only used if '
'swift_store_auth_version is 2.')),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER,
help=_('Container within the account that the account should '
'use for storing images in Swift.')),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('The size, in MB, that Glance will start chunking image '
'files and do a large object manifest in Swift')),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('The amount of data written to a temporary disk buffer '
'during the process of chunking the image file.')),
cfg.BoolOpt('swift_store_create_container_on_put', default=False,
help=_('A boolean value that determines if we create the '
'container if it does not exist.')),
cfg.BoolOpt('swift_store_multi_tenant', default=False,
help=_('If set to True, enables multi-tenant storage '
'mode which causes Glance images to be stored in '
'tenant specific Swift accounts.')),
cfg.ListOpt('swift_store_admin_tenants', default=[],
help=_('A list of tenants that will be granted read/write '
'access on all Swift containers created by Glance in '
'multi-tenant mode.')),
cfg.BoolOpt('swift_store_ssl_compression', default=True,
help=_('If set to False, disables SSL layer compression of '
'https swift requests. Setting to False may improve '
'performance for images which are already in a '
'compressed format, eg qcow2.')),
]
CONF = cfg.CONF
CONF.register_opts(swift_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:pass@authurl.com/container/obj-id
swift://account:user:pass@authurl.com/container/obj-id
swift+http://user:pass@authurl.com/container/obj-id
swift+https://user:pass@authurl.com/container/obj-id
When using multi-tenant a URI might look like this (a storage URL):
swift+https://example.com/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.auth_or_store_url = self.specs.get('auth_or_store_url')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user and self.key:
return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key))
return ''
def get_uri(self):
auth_or_store_url = self.auth_or_store_url
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = self._get_credstring()
auth_or_store_url = auth_or_store_url.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url,
container, obj)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:pass@authurl.com/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:pass@authurl.com/v1/container/obj")
LOG.debug(_("Invalid store URI: %(reason)s"), {'reason': reason})
raise exceptions.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
if len(cred_parts) != 2:
reason = (_("Badly formed credentials in Swift URI."))
LOG.debug(reason)
raise exceptions.BadStoreUri()
user, key = cred_parts
self.user = urllib.unquote(user)
self.key = urllib.unquote(key)
else:
self.user = None
self.key = None
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI.")
LOG.debug(reason)
raise exceptions.BadStoreUri()
@property
def swift_url(self):
"""
Creates a fully-qualified auth url that the Swift client library can
use. The scheme for the auth_url is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.auth_or_store_url.startswith('http'):
return self.auth_or_store_url
else:
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
return ''.join([auth_scheme, self.auth_or_store_url])
def Store(context=None, loc=None):
if (CONF.swift_store_multi_tenant and
(loc is None or loc.store_location.user is None)):
return MultiTenantStore(context, loc)
return SingleTenantStore(context, loc)
class BaseStore(glance.store.driver.Store):
CHUNKSIZE = 65536
def get_schemes(self):
return ('swift+https', 'swift', 'swift+http')
def configure(self):
_obj_size = self._option_get('swift_store_large_object_size')
self.large_object_size = _obj_size * ONE_MB
_chunk_size = self._option_get('swift_store_large_object_chunk_size')
self.large_object_chunk_size = _chunk_size * ONE_MB
self.admin_tenants = CONF.swift_store_admin_tenants
self.region = CONF.swift_store_region
self.service_type = CONF.swift_store_service_type
self.endpoint_type = CONF.swift_store_endpoint_type
self.snet = CONF.swift_enable_snet
self.insecure = CONF.swift_store_auth_insecure
self.ssl_compression = CONF.swift_store_ssl_compression
def get(self, location, offset=0, chunk_size=None, context=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers, resp_body = connection.get_object(
container=location.container, obj=location.obj,
resp_chunk_size=self.CHUNKSIZE)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find object %s.") % location.obj
LOG.warn(msg)
raise exceptions.NotFound(msg)
else:
raise
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
length = int(resp_headers.get('content-length', 0))
return (ResponseIndexable(resp_body, length), length)
def get_size(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers = connection.head_object(
container=location.container, obj=location.obj)
return int(resp_headers.get('content-length', 0))
except Exception:
return 0
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def _delete_stale_chunks(self, connection, container, chunk_list):
for chunk in chunk_list:
LOG.debug(_("Deleting chunk %s") % chunk)
try:
connection.delete_object(container, chunk)
except Exception:
msg = _("Failed to delete orphaned chunk %s/%s")
LOG.exception(msg, container, chunk)
def add(self, image_id, image_file, image_size, connection=None):
location = self.create_location(image_id)
if not connection:
connection = self.get_connection(location)
self._create_container_if_missing(location.container, connection)
LOG.debug(_("Adding image object '%(obj_name)s' "
"to Swift") % dict(obj_name=location.obj))
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = connection.put_object(location.container,
location.obj, image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
math.ceil(float(image_size) /
float(self.large_object_chunk_size))))
else:
# image_size == 0 is when we don't know the size
# of the image. This can occur with older clients
# that don't inspect the payload size.
LOG.debug(_("Cannot determine image size. Adding as a "
"segmented object to Swift."))
total_chunks = '?'
checksum = hashlib.md5()
written_chunks = []
combined_chunks_size = 0
while True:
chunk_size = self.large_object_chunk_size
if image_size == 0:
content_length = None
else:
left = image_size - combined_chunks_size
if left == 0:
break
if chunk_size > left:
chunk_size = left
content_length = chunk_size
chunk_name = "%s-%05d" % (location.obj, chunk_id)
reader = ChunkReader(image_file, checksum, chunk_size)
try:
chunk_etag = connection.put_object(
location.container, chunk_name, reader,
content_length=content_length)
written_chunks.append(chunk_name)
except Exception:
# Delete orphaned segments from swift backend
with excutils.save_and_reraise_exception():
LOG.exception(_("Error during chunked upload to "
"backend, deleting stale chunks"))
self._delete_stale_chunks(connection,
location.container,
written_chunks)
bytes_read = reader.bytes_read
msg = (_("Wrote chunk %(chunk_name)s (%(chunk_id)d/"
"%(total_chunks)s) of length %(bytes_read)d "
"to Swift returning MD5 of content: "
"%(chunk_etag)s") %
{'chunk_name': chunk_name,
'chunk_id': chunk_id,
'total_chunks': total_chunks,
'bytes_read': bytes_read,
'chunk_etag': chunk_etag})
LOG.debug(msg)
if bytes_read == 0:
# Delete the last chunk, because it's of zero size.
# This will happen if size == 0.
LOG.debug(_("Deleting final zero-length chunk"))
connection.delete_object(location.container,
chunk_name)
break
chunk_id += 1
combined_chunks_size += bytes_read
# In the case we have been given an unknown image size,
# set the size to the total size of the combined chunks.
if image_size == 0:
image_size = combined_chunks_size
# Now we write the object manifest and return the
# manifest's etag...
manifest = "%s/%s-" % (location.container, location.obj)
headers = {'ETag': hashlib.md5("").hexdigest(),
'X-Object-Manifest': manifest}
# The ETag returned for the manifest is actually the
# MD5 hash of the concatenated checksums of the strings
# of each chunk...so we ignore this result in favour of
# the MD5 of the entire image file contents, so that
# users can verify the image file contents accordingly
connection.put_object(location.container, location.obj,
None, headers=headers)
obj_etag = checksum.hexdigest()
# NOTE: We return the user and key here! Have to because
# location is used by the API server to return the actual
# image data. We *really* should consider NOT returning
# the location attribute from GET /images/<ID> and
# GET /images/details
return (location.get_uri(), image_size, obj_etag, {})
except swiftclient.ClientException as e:
if e.http_status == httplib.CONFLICT:
raise exceptions.Duplicate(_("Swift already has an image at "
"this location"))
msg = (_("Failed to add object to Swift.\n"
"Got error from Swift: %(e)s") % {'e': e})
LOG.error(msg)
raise glance.store.BackendException(msg)
def delete(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
# We request the manifest for the object. If one exists,
# that means the object was uploaded in chunks/segments,
# and we need to delete all the chunks as well as the
# manifest.
manifest = None
try:
headers = connection.head_object(
location.container, location.obj)
manifest = headers.get('x-object-manifest')
except swiftclient.ClientException as e:
if e.http_status != httplib.NOT_FOUND:
raise
if manifest:
# Delete all the chunks before the object manifest itself
obj_container, obj_prefix = manifest.split('/', 1)
segments = connection.get_container(
obj_container, prefix=obj_prefix)[1]
for segment in segments:
# TODO(jaypipes): This would be an easy area to parallelize
# since we're simply sending off parallelizable requests
# to Swift to delete stuff. It's not like we're going to
# be hogging up network or file I/O here...
connection.delete_object(obj_container,
segment['name'])
# Delete object (or, in segmented case, the manifest)
connection.delete_object(location.container, location.obj)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exceptions.NotFound(msg)
else:
raise
def _create_container_if_missing(self, container, connection):
"""
Creates a missing container in Swift if the
``swift_store_create_container_on_put`` option is set.
:param container: Name of container to create
:param connection: Connection to swift service
"""
try:
connection.head_container(container)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
if CONF.swift_store_create_container_on_put:
try:
connection.put_container(container)
except swiftclient.ClientException as e:
msg = (_("Failed to add container to Swift.\n"
"Got error from Swift: %(e)s") % {'e': e})
raise glance.store.BackendException(msg)
else:
msg = (_("The container %(container)s does not exist in "
"Swift. Please set the "
"swift_store_create_container_on_put option"
"to add container to Swift automatically.") %
{'container': container})
raise glance.store.BackendException(msg)
else:
raise
def get_connection(self):
raise NotImplemented()
def create_location(self):
raise NotImplemented()
class SingleTenantStore(BaseStore):
EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>"
def configure(self):
super(SingleTenantStore, self).configure()
self.auth_version = self._option_get('swift_store_auth_version')
def configure_add(self):
self.auth_address = self._option_get('swift_store_auth_address')
if self.auth_address.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
self.container = CONF.swift_store_container
self.user = self._option_get('swift_store_user')
self.key = self._option_get('swift_store_key')
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container,
'obj': str(image_id),
'auth_or_store_url': self.auth_address,
'user': self.user,
'key': self.key}
return StoreLocation(specs)
def get_connection(self, location):
if not location.user:
reason = (_("Location is missing user:password information."))
LOG.debug(reason)
raise exceptions.BadStoreUri(message=reason)
auth_url = location.swift_url
if not auth_url.endswith('/'):
auth_url += '/'
if self.auth_version == '2':
try:
tenant_name, user = location.user.split(':')
except ValueError:
reason = (_("Badly formed tenant:user '%(user)s' in "
"Swift URI") % {'user': location.user})
LOG.debug(reason)
raise exceptions.BadStoreUri()
else:
tenant_name = None
user = location.user
os_options = {}
if self.region:
os_options['region_name'] = self.region
os_options['endpoint_type'] = self.endpoint_type
os_options['service_type'] = self.service_type
return swiftclient.Connection(
auth_url, user, location.key, insecure=self.insecure,
tenant_name=tenant_name, snet=self.snet,
auth_version=self.auth_version, os_options=os_options,
ssl_compression=self.ssl_compression)
class MultiTenantStore(BaseStore):
EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>"
def configure_add(self):
self.container = CONF.swift_store_container
if self.context is None:
reason = _("Multi-tenant Swift storage requires a context.")
raise exceptions.BadStoreConfiguration(store_name="swift",
reason=reason)
if self.context.service_catalog is None:
reason = _("Multi-tenant Swift storage requires "
"a service catalog.")
raise exceptions.BadStoreConfiguration(store_name="swift",
reason=reason)
self.storage_url = auth.get_endpoint(
self.context.service_catalog, service_type=self.service_type,
endpoint_region=self.region, endpoint_type=self.endpoint_type)
if self.storage_url.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
def delete(self, location, connection=None):
if not connection:
connection = self.get_connection(location.store_location)
super(MultiTenantStore, self).delete(location, connection)
connection.delete_container(location.store_location.container)
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
if read_tenants is None:
read_tenants = []
if write_tenants is None:
write_tenants = []
headers = {}
if public:
headers['X-Container-Read'] = ".r:*,.rlistings"
elif read_tenants:
headers['X-Container-Read'] = ','.join('%s:*' % i
for i in read_tenants)
else:
headers['X-Container-Read'] = ''
write_tenants.extend(self.admin_tenants)
if write_tenants:
headers['X-Container-Write'] = ','.join('%s:*' % i
for i in write_tenants)
else:
headers['X-Container-Write'] = ''
try:
connection.post_container(location.container, headers=headers)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exceptions.NotFound(msg)
else:
raise
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container + '_' + str(image_id),
'obj': str(image_id),
'auth_or_store_url': self.storage_url}
return StoreLocation(specs)
def get_connection(self, location):
return swiftclient.Connection(
None, self.context.user, None,
preauthurl=location.swift_url,
preauthtoken=self.context.auth_tok,
tenant_name=self.context.tenant,
auth_version='2', snet=self.snet, insecure=self.insecure,
ssl_compression=self.ssl_compression)
class ChunkReader(object):
def __init__(self, fd, checksum, total):
self.fd = fd
self.checksum = checksum
self.total = total
self.bytes_read = 0
def read(self, i):
left = self.total - self.bytes_read
if i > left:
i = left
result = self.fd.read(i)
self.bytes_read += len(result)
self.checksum.update(result)
return result
| |
""" PLC 1 """
from minicps.devices import PLC
from threading import Thread
from utils import *
from random import *
import json
from decimal import Decimal
import select
import socket
import time
Q101 = ('Q101', 1)
Q102 = ('Q102', 1)
LIT101 = ('LIT101', 1)
LIT102 = ('LIT102', 1)
LIT103 = ('LIT103', 1)
SENSOR_ADDR = IP['lit101']
LIT_102_ADDR = IP['lit102']
IDS_ADDR = IP['ids101']
PLC_ADDR = IP['plc101']
#lit103 = Y30
#lit103_prev = Y30
class Lit101Socket(Thread):
""" Class that receives water level from the water_tank.py """
def __init__(self, plc_object):
Thread.__init__(self)
self.plc = plc_object
def run(self):
while (self.plc.count <= PLC_SAMPLES):
try:
self.plc.received_lit101 = float(self.plc.receive(LIT101, SENSOR_ADDR))
self.plc.lit_rec_time = time.time()
except KeyboardInterrupt:
print "\nCtrl+C was hitten, stopping server"
client.close()
break
print "Socket closed"
class Lit102Socket(Thread):
""" Class that receives water level from the water_tank.py """
def __init__(self, plc_object):
Thread.__init__(self)
self.plc = plc_object
def run(self):
while (self.plc.count <= PLC_SAMPLES):
try:
self.plc.received_lit102 = float(self.plc.receive(LIT102, LIT_102_ADDR))
except KeyboardInterrupt:
print "\nCtrl+C was hitten, stopping server"
client.close()
break
print "Socket closed"
class PLC101(PLC):
def send_message(self, ipaddr, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipaddr, port))
msg_dict = dict.fromkeys(['Type', 'Variable'])
msg_dict['Type'] = "Report"
msg_dict['Variable'] = message
message = json.dumps(str(msg_dict))
try:
ready_to_read, ready_to_write, in_error = select.select([sock, ], [sock, ], [], 5)
except:
print "Socket error"
return
if(ready_to_write > 0):
sock.send(message)
sock.close()
def change_references(self):
if self.count <= 50:
self.ref_y0 = 0.4
if self.count > 50 and self.count <= 350:
self.ref_y0 = 0.450
if self.count > 350:
self.ref_y0 = 0.4
if self.count <= 70:
self.ref_y1 = 0.2
if self.count > 70 and self.count <= 400:
self.ref_y1 = 0.225
if self.count > 400:
self.ref_y1 = 0.2
def saturar_xhat(self, valores):
for i in range(len(valores)):
if valores[i] > self.xmax[i]:
valores[i] = self.xmax[i]
if valores[i] < self.xmin[i]:
valores[i] = self.xmin[i]
return valores
def saturar_inc(self, valores):
for i in range(len(valores)):
if valores[i] > self.xmax[i]:
valores[i] = self.xmax[i]
if valores[i] < self.xmin[i]:
valores[i] = self.xmin[i]
return valores
def pre_loop(self, sleep=0.1):
# Controller Initial Conditions
self.count = 0
self.received_lit101 = 0.4
self.received_lit102 = 0.2
self.lit101 = 0.0
self.lit102 = 0.0
lit103 = 0.0
self.q1 = 0.0
self.q2 = 0.0
self.z = np.array([[0.0],[0.0]])
self.xhat = np.array([[0.0],[0.0],[0.0]])
self.w1 = np.array([[0.0],[0.0],[0.0]])
self.w2 = np.array([[0.0],[0.0],[0.0]])
self.K1K2 = np.concatenate((K1,K2),axis=1)
self.prev_inc_i = np.array([[0.0],[0.0]])
self.ym=np.array([[0.0],[0.0]])
self.ya=np.array([[0.0],[0.0]])
self.yr=np.array([[0.0],[0.0]])
self.prev_ya=np.array([[0.0],[0.0]])
self.xmin = [-0.4, -0.2, -0.3]
self.xmax = [0.22, 0.42, 0.32]
self.umin = [-4e-5, -4e-5]
self.umax = [5e-5, 5e-5]
self.prod_1 = Aobsv-(np.matmul(np.matmul(Gobsv,Cobsv),Aobsv))
self.prod_2 = Bobsv-(np.matmul(np.matmul(Gobsv,Cobsv),Bobsv))
self.prod_3 = np.matmul(T1,B)
self.prod_4 = np.matmul(T2,B)
self.tim_uio_1 = 0
self.tim_uio_2 = 0
#self.th_uio_on = 0.003*2
self.th_uio_on = 0.0015
self.bad_lit_flag = 0
self.defense = 0.0
def main_loop(self):
"""plc1 main loop.
- reads sensors value
- drives actuators according to the control strategy
- updates its enip server
"""
lit101socket = Lit101Socket(self)
lit101socket.start()
#lit102socket = Lit102Socket(self)
#lit102socket.start()
begin = time.time()
#print " %Begin ", begin
while_begin = 0.0
self.lit_rec_time =0.0
control_time = 0.0
act_send_time = 0.0
time_btw_cycles = 0.0
while(self.count <= PLC_SAMPLES):
try:
time_btw_cycles = time.time()
self.change_references()
self.lit101 = self.received_lit101 - Y10
#self.lit102 = self.received_lit102 - Y20
self.lit102 = float(self.get(LIT102)) - Y20
lit103 = float(self.get(LIT103)) - Y30
self.ym[0,0]=self.lit101
self.ym[1,0]=self.lit102
#self.xhat = np.matmul((Aobsv-(np.matmul(np.matmul(Gobsv,Cobsv),Aobsv))),self.xhat) + np.matmul((Bobsv-(np.matmul(np.matmul(Gobsv,Cobsv),Bobsv))),self.prev_inc_i) + np.matmul(Gobsv,self.ya)
self.ya[0,0]=self.ym[0,0]
self.ya[1,0]=self.ym[1,0]
self.w1 = np.matmul(F1, self.w1) + np.matmul(self.prod_3,self.prev_inc_i) + Ksp1*self.prev_ya[1,0]
self.zhat_uio1 = self.w1 + Hsp1*self.ya[1,0]
self.ruio1 = self.ya - np.matmul(Cobsv,self.zhat_uio1 )
#print self.count, " ", self.ruio1[0]
#print self.count, " ", self.ruio1.transpose()
self.w2 = np.matmul(F2, self.w2) + np.matmul(self.prod_4,self.prev_inc_i) + Ksp2*self.prev_ya[0,0]
self.zhat_uio2 = self.w2 + Hsp2*self.ya[0,0]
self.ruio2 = self.ya - np.matmul(Cobsv,self.zhat_uio2 )
print self.count, " ", self.ruio2.transpose()
if abs(self.ruio1[0]) >= self.th_uio_on:
self.tim_uio_1 = 1
else:
self.tim_uio_1 = 0
if abs(self.ruio2[1]) >= self.th_uio_on:
self.tim_uio_2 = 1
else:
self.tim_uio_2 = 0
#print self.count, " ", self.tim_uio_1
#print self.count, " ", self.tim_uio_2
self.v1 = np.matmul(Cobsv[0],(self.zhat_uio1-self.zhat_uio2))*self.tim_uio_1
#print self.count, " ", self.v1
self.v2 = np.matmul(Cobsv[1],(self.zhat_uio2-self.zhat_uio1))*self.tim_uio_2
#print self.count, " ", self.v2
self.v_total=np.array([[self.v1[0]],[self.v2[0]]])
#print self.count, " ", self.v_total.transpose()
self.yr = self.ya + self.defense*self.v_total
#self.yr = self.ya
self.xhat = np.matmul(self.prod_1,self.xhat) + np.matmul(self.prod_2,self.prev_inc_i) + np.matmul(Gobsv,self.ya)
self.xhat=self.saturar_xhat(self.xhat)
self.xhatz=np.concatenate((self.xhat,self.z), axis=0)
self.current_inc_i = np.matmul(-self.K1K2,self.xhatz)
self.current_inc_i = self.saturar_inc(self.current_inc_i)
self.prev_inc_i = self.current_inc_i
self.prev_ya = self.ya
# Aca hay que calcular el error de L1, L2 (self.lit101' y self.lit102')
self.lit101_error = self.ref_y0 - self.yr[0,0] - Y10
self.lit102_error = self.ref_y1 - self.yr[1,0] - Y20
# Z(k+1) = z(k) + error(k)
self.z[0,0] = self.z[0,0] + self.lit101_error
self.z[1,0] = self.z[1,0] + self.lit102_error
self.q1 = Q1 + self.current_inc_i[0]
self.q2 = Q2 + self.current_inc_i[1]
#self.set(Q101, float(self.q1))
#self.set(Q102, float(self.q2))
control_time = time.time() - time_btw_cycles
self.send_message(IP['q101'], 7842 ,float(self.q1))
self.send_message(IP['q102'], 7842 ,float(self.q2))
act_send_time = time.time() - control_time
self.count = self.count + 1
#print "% control ", control_time
#print "% act send ", act_send_time
#print "% btw ", time_btw_cycles
#print "% lit rec ", self.lit_rec_time
time.sleep(PLC_PERIOD_SEC)
except Exception as e:
print e
print "Switching to backup"
break
if __name__ == "__main__":
plc101 = PLC101(name='plc101',state=STATE,protocol=PLC101_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
| |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
frozen_lockfile=True,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force,
frozen_lockfile=frozen_lockfile)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
if frozen_lockfile:
return_args.append('--frozen-lockfile')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
if frozen_lockfile:
LOG.warning('{} does not support frozen lockfile option. Ignored.'.format(self.name))
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
| |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a Dynamic Search Ads campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
import uuid
from googleads import adwords
def main(client):
budget = _CreateBudget(client)
campaign_id = _CreateCampaign(client, budget)
ad_group_id = _CreateAdGroup(client, campaign_id)
_CreateExpandedDSA(client, ad_group_id)
_AddWebPageCriteria(client, ad_group_id)
print 'Dynamic Search Ads campaign setup is complete.'
def _CreateBudget(client):
"""Creates the budget.
Args:
client: an AdWordsClient instance.
Returns:
a suds.sudsobject.Object representation of the created budget.
"""
budget_service = client.GetService('BudgetService', version='v201806')
# Create the campaign budget
operation = {
'operand': {
'name': 'Interplanetary Cruise Budget #%d' % uuid.uuid4(),
'deliveryMethod': 'STANDARD',
'amount': {
'microAmount': 500000
}
},
'operator': 'ADD'
}
budget = budget_service.mutate([operation])['value'][0]
print 'Budget with ID "%d" and name "%s" was created.' % (
budget['budgetId'], budget['name'])
return budget
def _CreateCampaign(client, budget):
"""Creates the campaign.
Args:
client: an AdWordsClient instance.
budget: a suds.sudsobject.Object representation of a created budget.
Returns:
An integer campaign ID.
"""
campaign_service = client.GetService('CampaignService')
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise #%d' % uuid.uuid4(),
# Recommendation: Set the campaign to PAUSED when creating it to
# prevent the ads from immediately serving. Set to ENABLED once you've
# added targeting and the ads are ready to serve.
'status': 'PAUSED',
'advertisingChannelType': 'SEARCH',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
},
'budget': budget,
# Required: Set the campaign's Dynamic Search Ad settings.
'settings': [{
'xsi_type': 'DynamicSearchAdsSetting',
# Required: Set the domain name and language.
'domainName': 'example.com',
'languageCode': 'en'
}],
# Optional: Set the start date.
'startDate': (datetime.datetime.now() +
datetime.timedelta(1)).strftime('%Y%m%d'),
# Optional: Set the end date.
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
}
}]
campaign = campaign_service.mutate(operations)['value'][0]
campaign_id = campaign['id']
print 'Campaign with ID "%d" and name "%s" was added.' % (
campaign_id, campaign['name'])
return campaign_id
def _CreateAdGroup(client, campaign_id):
"""Creates an ad group.
Args:
client: an AdWordsClient instance.
campaign_id: an integer campaign ID.
Returns:
An integer ad group ID.
"""
ad_group_service = client.GetService('AdGroupService')
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'adGroupType': 'SEARCH_DYNAMIC_ADS',
'name': 'Earth to Mars Cruises #%d' % uuid.uuid4(),
'status': 'PAUSED',
'biddingStrategyConfiguration': {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': '3000000'
},
}]
}
}
}]
ad_group = ad_group_service.mutate(operations)['value'][0]
ad_group_id = ad_group['id']
print 'Ad group with ID "%d" and name "%s" was created.' % (
ad_group_id, ad_group['name'])
return ad_group_id
def _CreateExpandedDSA(client, ad_group_id):
"""Creates the expanded Dynamic Search Ad.
Args:
client: an AdwordsClient instance.
ad_group_id: an integer ID of the ad group in which the DSA is added.
"""
# Get the AdGroupAdService.
ad_group_ad_service = client.GetService('AdGroupAdService')
# Create the operation
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
# Create the expanded dynamic search ad. This ad will have its
# headline and final URL auto-generated at serving time according to
# domain name specific information provided by DynamicSearchAdsSetting
# at the campaign level.
'ad': {
'xsi_type': 'ExpandedDynamicSearchAd',
# Set the ad description.
'description': 'Buy your tickets now!'
},
# Optional: Set the status.
'status': 'PAUSED',
}
}]
# Create the ad.
ad = ad_group_ad_service.mutate(operations)['value'][0]['ad']
# Display the results.
print ('Expanded dynamic search ad with ID "%d" and description "%s" was '
'added' % (ad['id'], ad['description']))
def _AddWebPageCriteria(client, ad_group_id):
"""Adds a web page criterion to target Dynamic Search Ads.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID of the ad group the criteria is being added to.
"""
ad_group_criterion_service = client.GetService('AdGroupCriterionService',
version='v201806')
operations = [{
'operator': 'ADD',
# Create biddable ad group criterion.
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
# Create a webpage criterion for special offers for children.
'criterion': {
'xsi_type': 'Webpage',
'parameter': {
'criterionName': 'Special offers for children.',
'conditions': [
{
'operand': 'URL',
'argument': '/marscruise/children'
},
{
'operand': 'PAGE_TITLE',
'argument': 'Special Offer'
}
]
}
},
'userStatus': 'PAUSED',
# Optional: set a custom bid.
'biddingStrategyConfiguration': {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': 10000000L
}
}]
}
}
}]
criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print 'Webpage criterion with ID "%d" was added to ad group ID "%d".' % (
criterion['criterion']['id'], criterion['adGroupId'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import numbers
import time
from monascaclient.common import utils
import monascaclient.exc as exc
from monascaclient.openstack.common import jsonutils
# Alarm valid types
severity_types = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']
state_types = ['UNDETERMINED', 'ALARM', 'OK']
enabled_types = ['True', 'true', 'False', 'false']
# Notification valid types
notification_types = ['EMAIL', 'WEBHOOK', 'PAGERDUTY']
@utils.arg('name', metavar='<METRIC_NAME>',
help='Name of the metric to create.')
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to create a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--value-meta', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair for extra information about a value. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'value_meta need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--time', metavar='<UNIX_TIMESTAMP>',
default=time.time() * 1000, type=int,
help='Metric timestamp in milliseconds. Default: current timestamp.')
@utils.arg('--project-id', metavar='<CROSS_PROJECT_ID>',
help='The Project ID to create metric on behalf of. '
'Requires monitoring-delegate role in keystone.',
action='append')
@utils.arg('value', metavar='<METRIC_VALUE>',
type=float,
help='Metric value.')
def do_metric_create(mc, args):
'''Create metric.'''
fields = {}
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
fields['timestamp'] = args.time
fields['value'] = args.value
if args.value_meta:
fields['value_meta'] = utils.format_parameters(args.value_meta)
if args.project_id:
fields['tenant_id'] = args.project_id
try:
mc.metrics.create(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print('Successfully created metric')
@utils.arg('jsonbody', metavar='<JSON_BODY>',
type=json.loads,
help='The raw JSON body in single quotes. See api doc.')
def do_metric_create_raw(mc, args):
'''Create metric from raw json body.'''
fields = {}
fields['jsonbody'] = args.jsonbody
try:
mc.metrics.create(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print('Successfully created metric')
@utils.arg('--name', metavar='<METRIC_NAME>',
help='Name of the metric to list.')
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_metric_list(mc, args):
'''List metrics for this tenant.'''
fields = {}
if args.name:
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
metric = mc.metrics.list(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(metric))
return
cols = ['name', 'dimensions']
formatters = {
'name': lambda x: x['name'],
'dimensions': lambda x: utils.format_dict(x['dimensions']),
}
if isinstance(metric, list):
# print the list
utils.print_list(metric, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
metric_list = list()
metric_list.append(metric)
utils.print_list(
metric_list,
cols,
formatters=formatters)
def format_measure_timestamp(measurements):
# returns newline separated times for the timestamp column
return '\n'.join([str(m[0]) for m in measurements])
def format_measure_value(measurements):
# returns newline separated values for the value column
return '\n'.join(['{:12.2f}'.format(m[1]) for m in measurements])
def format_value_meta(measurements):
# returns newline separated values for the value column
measure_string_list = list()
for measure in measurements:
if len(measure) < 3:
measure_string = ""
else:
meta_string_list = []
for k, v in measure[2].items():
if isinstance(v, numbers.Number):
m_str = k + ': ' + str(v)
else:
m_str = k + ': ' + v
meta_string_list.append(m_str)
measure_string = ','.join(meta_string_list)
measure_string_list.append(measure_string)
return '\n'.join(measure_string_list)
def format_statistic_timestamp(statistics, columns, name):
# returns newline separated times for the timestamp column
time_index = 0
if statistics:
time_index = columns.index(name)
time_list = list()
for timestamp in statistics:
time_list.append(str(timestamp[time_index]))
return '\n'.join(time_list)
def format_statistic_value(statistics, columns, stat_type):
# find the index for column name
stat_index = 0
if statistics:
stat_index = columns.index(stat_type)
value_list = list()
for stat in statistics:
value_str = '{:12.2f}'.format(stat[stat_index])
value_list.append(value_str)
return '\n'.join(value_list)
def format_metric_name(metrics):
# returns newline separated metric names for the column
metric_string_list = list()
for metric in metrics:
metric_name = metric['name']
metric_dimensions = metric['dimensions']
metric_string_list.append(metric_name)
# need to line up with dimensions column
rng = len(metric_dimensions)
for i in range(rng):
if i == rng - 1:
# last one
break
metric_string_list.append(" ")
return '\n'.join(metric_string_list)
def format_metric_dimensions(metrics):
# returns newline separated dimension key values for the column
metric_string_list = list()
for metric in metrics:
metric_dimensions = metric['dimensions']
for k, v in metric_dimensions.items():
if isinstance(v, numbers.Number):
d_str = k + ': ' + str(v)
else:
d_str = k + ': ' + v
metric_string_list.append(d_str)
return '\n'.join(metric_string_list)
@utils.arg('name', metavar='<METRIC_NAME>',
help='Name of the metric to list measurements.')
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('starttime', metavar='<UTC_START_TIME>',
help='measurements >= UTC time. format: 2014-01-01T00:00:00Z. OR Format: -120 (previous 120 minutes')
@utils.arg('--endtime', metavar='<UTC_END_TIME>',
help='measurements <= UTC time. format: 2014-01-01T00:00:00Z.')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
@utils.arg('--merge_metrics', action='store_const',
const=True,
help='Merge multiple metrics into a single result.')
def do_measurement_list(mc, args):
'''List measurements for the specified metric.'''
fields = {}
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
if args.starttime[0] == '-':
deltaT = time.time() + (int(args.starttime) * 60)
utc = str(datetime.datetime.utcfromtimestamp(deltaT))
utc = utc.replace(" ", "T")[:-7] + 'Z'
args.starttime = utc
fields['start_time'] = args.starttime
if args.endtime:
fields['end_time'] = args.endtime
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
if args.merge_metrics:
fields['merge_metrics'] = args.merge_metrics
try:
metric = mc.metrics.list_measurements(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(metric))
return
cols = ['name', 'dimensions', 'timestamp', 'value', 'value_meta']
formatters = {
'name': lambda x: x['name'],
'dimensions': lambda x: utils.format_dict(x['dimensions']),
'timestamp': lambda x: format_measure_timestamp(x['measurements']),
'value': lambda x: format_measure_value(x['measurements']),
'value_meta': lambda x: format_value_meta(x['measurements']),
}
if isinstance(metric, list):
# print the list
utils.print_list(metric, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
metric_list = list()
metric_list.append(metric)
utils.print_list(
metric_list,
cols,
formatters=formatters)
@utils.arg('name', metavar='<METRIC_NAME>',
help='Name of the metric to report measurement statistics.')
@utils.arg('statistics', metavar='<STATISTICS>',
help='Statistics is one or more (separated by commas) of '
'[AVG, MIN, MAX, COUNT, SUM].')
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('starttime', metavar='<UTC_START_TIME>',
help='measurements >= UTC time. format: 2014-01-01T00:00:00Z. OR Format: -120 (previous 120 minutes')
@utils.arg('--endtime', metavar='<UTC_END_TIME>',
help='measurements <= UTC time. format: 2014-01-01T00:00:00Z.')
@utils.arg('--period', metavar='<PERIOD>',
help='number of seconds per interval (default is 300)')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
@utils.arg('--merge_metrics', action='store_const',
const=True,
help='Merge multiple metrics into a single result.')
def do_metric_statistics(mc, args):
'''List measurement statistics for the specified metric.'''
statistic_types = ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM']
statlist = args.statistics.split(',')
for stat in statlist:
if stat.upper() not in statistic_types:
errmsg = 'Invalid type, not one of [' + \
', '.join(statistic_types) + ']'
print(errmsg)
return
fields = {}
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
if args.starttime[0] == '-':
deltaT = time.time() + (int(args.starttime) * 60)
utc = str(datetime.datetime.utcfromtimestamp(deltaT))
utc = utc.replace(" ", "T")[:-7] + 'Z'
args.starttime = utc
fields['start_time'] = args.starttime
if args.endtime:
fields['end_time'] = args.endtime
if args.period:
fields['period'] = args.period
fields['statistics'] = args.statistics
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
if args.merge_metrics:
fields['merge_metrics'] = args.merge_metrics
try:
metric = mc.metrics.list_statistics(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(metric))
return
cols = ['name', 'dimensions']
# add dynamic column names
if metric:
column_names = metric[0]['columns']
for name in column_names:
cols.append(name)
else:
# when empty set, print_list needs a col
cols.append('timestamp')
formatters = {
'name': lambda x: x['name'],
'dimensions': lambda x: utils.format_dict(x['dimensions']),
'timestamp': lambda x:
format_statistic_timestamp(x['statistics'], x['columns'],
'timestamp'),
'avg': lambda x:
format_statistic_value(x['statistics'], x['columns'], 'avg'),
'min': lambda x:
format_statistic_value(x['statistics'], x['columns'], 'min'),
'max': lambda x:
format_statistic_value(x['statistics'], x['columns'], 'max'),
'count': lambda x:
format_statistic_value(x['statistics'], x['columns'], 'count'),
'sum': lambda x:
format_statistic_value(x['statistics'], x['columns'], 'sum'),
}
if isinstance(metric, list):
# print the list
utils.print_list(metric, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
metric_list = list()
metric_list.append(metric)
utils.print_list(
metric_list,
cols,
formatters=formatters)
@utils.arg('name', metavar='<NOTIFICATION_NAME>',
help='Name of the notification to create.')
@utils.arg('type', metavar='<TYPE>',
help='The notification type. Type must be EMAIL, WEBHOOK, or PAGERDUTY.')
@utils.arg('address', metavar='<ADDRESS>',
help='A valid EMAIL Address, URL, or SERVICE KEY')
def do_notification_create(mc, args):
'''Create notification.'''
if args.type.upper() not in notification_types:
errmsg = 'Invalid type, not one of [' + \
', '.join(notification_types) + ']'
print(errmsg)
return
fields = {}
fields['name'] = args.name
fields['type'] = args.type
fields['address'] = args.address
try:
notification = mc.notifications.create(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(notification, indent=2))
@utils.arg('id', metavar='<NOTIFICATION_ID>',
help='The ID of the notification. If not specified returns all.')
def do_notification_show(mc, args):
'''Describe the notification.'''
fields = {}
fields['notification_id'] = args.id
try:
notification = mc.notifications.get(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(notification))
return
formatters = {
'name': utils.json_formatter,
'id': utils.json_formatter,
'type': utils.json_formatter,
'address': utils.json_formatter,
'links': utils.format_dictlist,
}
utils.print_dict(notification, formatters=formatters)
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_notification_list(mc, args):
'''List notifications for this tenant.'''
fields = {}
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
notification = mc.notifications.list(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(notification))
return
cols = ['name', 'id', 'type', 'address']
formatters = {
'name': lambda x: x['name'],
'id': lambda x: x['id'],
'type': lambda x: x['type'],
'address': lambda x: x['address'],
}
if isinstance(notification, list):
utils.print_list(
notification,
cols,
formatters=formatters)
else:
notif_list = list()
notif_list.append(notification)
utils.print_list(notif_list, cols, formatters=formatters)
@utils.arg('id', metavar='<NOTIFICATION_ID>',
help='The ID of the notification.')
def do_notification_delete(mc, args):
'''Delete notification.'''
fields = {}
fields['notification_id'] = args.id
try:
mc.notifications.delete(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print('Successfully deleted notification')
@utils.arg('id', metavar='<NOTIFICATION_ID>',
help='The ID of the notification.')
@utils.arg('name', metavar='<NOTIFICATION_NAME>',
help='Name of the notification.')
@utils.arg('type', metavar='<TYPE>',
help='The notification type. Type must be either EMAIL, WEBHOOK, or PAGERDUTY.')
@utils.arg('address', metavar='<ADDRESS>',
help='A valid EMAIL Address, URL, or SERVICE KEY')
def do_notification_update(mc, args):
'''Update notification.'''
fields = {}
fields['notification_id'] = args.id
fields['name'] = args.name
if args.type.upper() not in notification_types:
errmsg = 'Invalid type, not one of [' + \
', '.join(state_types) + ']'
print(errmsg)
return
fields['type'] = args.type
fields['address'] = args.address
try:
notification = mc.notifications.update(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(notification, indent=2))
@utils.arg('name', metavar='<ALARM_DEFINITION_NAME>',
help='Name of the alarm definition to create.')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Description of the alarm.')
@utils.arg('expression', metavar='<EXPRESSION>',
help='The alarm expression to evaluate. Quoted.')
@utils.arg('--severity', metavar='<SEVERITY>',
help='Severity is one of [LOW, MEDIUM, HIGH, CRITICAL].')
@utils.arg('--match-by', metavar='<DIMENSION_KEY1,DIMENSION_KEY2,...>',
help='The metric dimensions to match to the alarm dimensions. '
'One or more dimension key names separated by a comma. '
'Key names need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.')
@utils.arg('--alarm-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is ALARM. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--ok-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is OK. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--undetermined-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is '
'UNDETERMINED. This param may be specified multiple times.',
action='append')
def do_alarm_definition_create(mc, args):
'''Create an alarm definition.'''
fields = {}
fields['name'] = args.name
if args.description:
fields['description'] = args.description
fields['expression'] = args.expression
if args.alarm_actions:
fields['alarm_actions'] = args.alarm_actions
if args.ok_actions:
fields['ok_actions'] = args.ok_actions
if args.undetermined_actions:
fields['undetermined_actions'] = args.undetermined_actions
if args.severity:
if args.severity.upper() not in severity_types:
errmsg = 'Invalid severity, not one of [' + \
', '.join(severity_types) + ']'
print(errmsg)
return
fields['severity'] = args.severity
if args.match_by:
fields['match_by'] = args.match_by.split(',')
try:
alarm = mc.alarm_definitions.create(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(alarm, indent=2))
@utils.arg('id', metavar='<ALARM_DEFINITION_ID>',
help='The ID of the alarm definition.')
def do_alarm_definition_show(mc, args):
'''Describe the alarm definition.'''
fields = {}
fields['alarm_id'] = args.id
try:
alarm = mc.alarm_definitions.get(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(alarm))
return
# print out detail of a single alarm
formatters = {
'name': utils.json_formatter,
'id': utils.json_formatter,
'expression': utils.json_formatter,
'expression_data': utils.format_expression_data,
'match_by': utils.json_formatter,
'actions_enabled': utils.json_formatter,
'alarm_actions': utils.json_formatter,
'ok_actions': utils.json_formatter,
'severity': utils.json_formatter,
'undetermined_actions': utils.json_formatter,
'description': utils.json_formatter,
'links': utils.format_dictlist,
}
utils.print_dict(alarm, formatters=formatters)
@utils.arg('--name', metavar='<ALARM_DEFINITION_NAME>',
help='Name of the alarm definition.')
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_alarm_definition_list(mc, args):
'''List alarm definitions for this tenant.'''
fields = {}
if args.name:
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarm_definitions.list(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(alarm))
return
cols = ['name', 'id', 'expression', 'match_by', 'actions_enabled']
formatters = {
'name': lambda x: x['name'],
'id': lambda x: x['id'],
'expression': lambda x: x['expression'],
'match_by': lambda x: utils.format_list(x['match_by']),
'actions_enabled': lambda x: x['actions_enabled'],
}
if isinstance(alarm, list):
# print the list
utils.print_list(alarm, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
alarm_list = list()
alarm_list.append(alarm)
utils.print_list(alarm_list, cols, formatters=formatters)
@utils.arg('id', metavar='<ALARM_DEFINITION_ID>',
help='The ID of the alarm definition.')
def do_alarm_definition_delete(mc, args):
'''Delete the alarm definition.'''
fields = {}
fields['alarm_id'] = args.id
try:
mc.alarm_definitions.delete(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print('Successfully deleted alarm definition')
@utils.arg('id', metavar='<ALARM_DEFINITION_ID>',
help='The ID of the alarm definition.')
@utils.arg('name', metavar='<ALARM_DEFINITION_NAME>',
help='Name of the alarm definition.')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Description of the alarm.')
@utils.arg('expression', metavar='<EXPRESSION>',
help='The alarm expression to evaluate. Quoted.')
@utils.arg('--alarm-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is ALARM. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--ok-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is OK. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--undetermined-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is '
'UNDETERMINED. This param may be specified multiple times.',
action='append')
@utils.arg('actions_enabled', metavar='<ACTIONS-ENABLED>',
help='The actions-enabled boolean is one of [true,false]')
@utils.arg('--match-by', metavar='<DIMENSION_KEY1,DIMENSION_KEY2,...>',
help='The metric dimensions to match to the alarm dimensions. '
'One or more dimension key names separated by a comma. '
'Key names need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.')
@utils.arg('--severity', metavar='<SEVERITY>',
help='Severity is one of [LOW, MEDIUM, HIGH, CRITICAL].')
def do_alarm_definition_update(mc, args):
'''Update the alarm definition.'''
fields = {}
fields['alarm_id'] = args.id
fields['name'] = args.name
if args.description:
fields['description'] = args.description
fields['expression'] = args.expression
if args.alarm_actions:
fields['alarm_actions'] = args.alarm_actions
if args.ok_actions:
fields['ok_actions'] = args.ok_actions
if args.undetermined_actions:
fields['undetermined_actions'] = args.undetermined_actions
if args.actions_enabled:
if args.actions_enabled not in enabled_types:
errmsg = 'Invalid value, not one of [' + \
', '.join(enabled_types) + ']'
print(errmsg)
return
fields['actions_enabled'] = args.actions_enabled in ['true', 'True']
if args.match_by:
fields['match_by'] = args.match_by.split(',')
if args.severity:
if args.severity.upper() not in severity_types:
errmsg = 'Invalid severity, not one of [' + \
', '.join(severity_types) + ']'
print(errmsg)
return
fields['severity'] = args.severity
try:
alarm = mc.alarm_definitions.update(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(alarm, indent=2))
@utils.arg('id', metavar='<ALARM_DEFINITION_ID>',
help='The ID of the alarm definition.')
@utils.arg('--name', metavar='<ALARM_DEFINITION_NAME>',
help='Name of the alarm definition.')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Description of the alarm.')
@utils.arg('--expression', metavar='<EXPRESSION>',
help='The alarm expression to evaluate. Quoted.')
@utils.arg('--alarm-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is ALARM. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--ok-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is OK. '
'This param may be specified multiple times.',
action='append')
@utils.arg('--undetermined-actions', metavar='<NOTIFICATION-ID>',
help='The notification method to use when an alarm state is '
'UNDETERMINED. This param may be specified multiple times.',
action='append')
@utils.arg('--actions-enabled', metavar='<ACTIONS-ENABLED>',
help='The actions-enabled boolean is one of [true,false]')
@utils.arg('--severity', metavar='<SEVERITY>',
help='Severity is one of [LOW, MEDIUM, HIGH, CRITICAL].')
def do_alarm_definition_patch(mc, args):
'''Patch the alarm definition.'''
fields = {}
fields['alarm_id'] = args.id
if args.name:
fields['name'] = args.name
if args.description:
fields['description'] = args.description
if args.expression:
fields['expression'] = args.expression
if args.alarm_actions:
fields['alarm_actions'] = args.alarm_actions
if args.ok_actions:
fields['ok_actions'] = args.ok_actions
if args.undetermined_actions:
fields['undetermined_actions'] = args.undetermined_actions
if args.actions_enabled:
if args.actions_enabled not in enabled_types:
errmsg = 'Invalid value, not one of [' + \
', '.join(enabled_types) + ']'
print(errmsg)
return
fields['actions_enabled'] = args.actions_enabled in ['true', 'True']
if args.severity:
if args.severity.upper() not in severity_types:
errmsg = 'Invalid severity, not one of [' + \
', '.join(severity_types) + ']'
print(errmsg)
return
fields['severity'] = args.severity
try:
alarm = mc.alarm_definitions.patch(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(alarm, indent=2))
@utils.arg('--alarm-definition-id', metavar='<ALARM_DEFINITION_ID>',
help='The ID of the alarm definition.')
@utils.arg('--metric-name', metavar='<METRIC_NAME>',
help='Name of the metric.')
@utils.arg('--metric-dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--state', metavar='<ALARM_STATE>',
help='ALARM_STATE is one of [UNDETERMINED, OK, ALARM].')
@utils.arg('--state-updated-start-time', metavar='<UTC_STATE_UPDATED_START>',
help='Return all alarms whose state was updated on or after the time specified')
@utils.arg('--lifecycle-state', metavar='<LIFECYCLE_STATE>',
help='The lifecycle state of the alarm')
@utils.arg('--link', metavar='<LINK>',
help='The link to external data associated with the alarm')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_alarm_list(mc, args):
'''List alarms for this tenant.'''
fields = {}
if args.alarm_definition_id:
fields['alarm_definition_id'] = args.alarm_definition_id
if args.metric_name:
fields['metric_name'] = args.metric_name
if args.metric_dimensions:
fields['metric_dimensions'] = utils.format_parameters(args.metric_dimensions)
if args.state:
if args.state.upper() not in state_types:
errmsg = 'Invalid state, not one of [' + \
', '.join(state_types) + ']'
print(errmsg)
return
fields['state'] = args.state
if args.state_updated_start_time:
fields['state_updated_start_time'] = args.state_updated_start_time
if args.lifecycle_state:
fields['lifecycle_state'] = args.lifecycle_state
if args.link:
fields['link'] = args.link
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarms.list(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(alarm))
return
cols = ['id', 'alarm_definition_id', 'alarm_definition_name', 'metric_name', 'metric_dimensions',
'severity', 'state', 'lifecycle_state', 'link', 'state_updated_timestamp',
'updated_timestamp', "created_timestamp"]
formatters = {
'id': lambda x: x['id'],
'alarm_definition_id': lambda x: x['alarm_definition']['id'],
'alarm_definition_name': lambda x: x['alarm_definition']['name'],
'metric_name': lambda x: format_metric_name(x['metrics']),
'metric_dimensions': lambda x: format_metric_dimensions(x['metrics']),
'severity': lambda x: x['alarm_definition']['severity'],
'state': lambda x: x['state'],
'lifecycle_state': lambda x: x['lifecycle_state'],
'link': lambda x: x['link'],
'state_updated_timestamp': lambda x: x['state_updated_timestamp'],
'updated_timestamp': lambda x: x['updated_timestamp'],
'created_timestamp': lambda x: x['created_timestamp'],
}
if isinstance(alarm, list):
# print the list
utils.print_list(alarm, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
alarm_list = list()
alarm_list.append(alarm)
utils.print_list(alarm_list, cols, formatters=formatters)
@utils.arg('id', metavar='<ALARM_ID>',
help='The ID of the alarm.')
def do_alarm_show(mc, args):
'''Describe the alarm.'''
fields = {}
fields['alarm_id'] = args.id
try:
alarm = mc.alarms.get(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
if args.json:
print(utils.json_formatter(alarm))
return
# print out detail of a single alarm
formatters = {
'id': utils.json_formatter,
'alarm_definition': utils.json_formatter,
'metrics': utils.json_formatter,
'state': utils.json_formatter,
'links': utils.format_dictlist,
}
utils.print_dict(alarm, formatters=formatters)
@utils.arg('id', metavar='<ALARM_ID>',
help='The ID of the alarm.')
@utils.arg('state', metavar='<ALARM_STATE>',
help='ALARM_STATE is one of [UNDETERMINED, OK, ALARM].')
@utils.arg('lifecycle_state', metavar='<LIFECYCLE_STATE>',
help='The lifecycle state of the alarm')
@utils.arg('link', metavar='<LINK>',
help='A link to an external resource with information about the alarm')
def do_alarm_update(mc, args):
'''Update the alarm state.'''
fields = {}
fields['alarm_id'] = args.id
if args.state.upper() not in state_types:
errmsg = 'Invalid state, not one of [' + \
', '.join(state_types) + ']'
print(errmsg)
return
fields['state'] = args.state
fields['lifecycle_state'] = args.lifecycle_state
fields['link'] = args.link
try:
alarm = mc.alarms.update(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(alarm, indent=2))
@utils.arg('id', metavar='<ALARM_ID>',
help='The ID of the alarm.')
@utils.arg('--state', metavar='<ALARM_STATE>',
help='ALARM_STATE is one of [UNDETERMINED, OK, ALARM].')
@utils.arg('--lifecycle-state', metavar='<LIFECYCLE_STATE>',
help='The lifecycle state of the alarm')
@utils.arg('--link', metavar='<LINK>',
help='A link to an external resource with information about the alarm')
def do_alarm_patch(mc, args):
'''Patch the alarm state.'''
fields = {}
fields['alarm_id'] = args.id
if args.state:
if args.state.upper() not in state_types:
errmsg = 'Invalid state, not one of [' + \
', '.join(state_types) + ']'
print(errmsg)
return
fields['state'] = args.state
if args.lifecycle_state:
fields['lifecycle_state'] = args.lifecycle_state
if args.link:
fields['link'] = args.link
try:
alarm = mc.alarms.patch(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print(jsonutils.dumps(alarm, indent=2))
@utils.arg('id', metavar='<ALARM_ID>',
help='The ID of the alarm.')
def do_alarm_delete(mc, args):
'''Delete the alarm.'''
fields = {}
fields['alarm_id'] = args.id
try:
mc.alarms.delete(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
print('Successfully deleted alarm')
def output_alarm_history(args, alarm_history):
if args.json:
print(utils.json_formatter(alarm_history))
return
# format output
cols = ['alarm_id', 'new_state', 'old_state', 'reason',
'reason_data', 'metric_name', 'metric_dimensions', 'timestamp']
formatters = {
'alarm_id': lambda x: x['alarm_id'],
'new_state': lambda x: x['new_state'],
'old_state': lambda x: x['old_state'],
'reason': lambda x: x['reason'],
'reason_data': lambda x: x['reason_data'],
'metric_name': lambda x: format_metric_name(x['metrics']),
'metric_dimensions': lambda x: format_metric_dimensions(x['metrics']),
'timestamp': lambda x: x['timestamp'],
}
if isinstance(alarm_history, list):
# print the list
utils.print_list(alarm_history, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
alarm_list = list()
alarm_list.append(alarm_history)
utils.print_list(alarm_list, cols, formatters=formatters)
@utils.arg('id', metavar='<ALARM_ID>',
help='The ID of the alarm.')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_alarm_history(mc, args):
'''Alarm state transition history.'''
fields = {}
fields['alarm_id'] = args.id
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarms.history(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
output_alarm_history(args, alarm)
@utils.arg('--dimensions', metavar='<KEY1=VALUE1,KEY2=VALUE2...>',
help='key value pair used to specify a metric dimension. '
'This can be specified multiple times, or once with parameters '
'separated by a comma. '
'Dimensions need quoting when they contain special chars [&,(,),{,},>,<] '
'that confuse the CLI parser.',
action='append')
@utils.arg('--starttime', metavar='<UTC_START_TIME>',
help='measurements >= UTC time. format: 2014-01-01T00:00:00Z. OR format: -120 (for previous 2 hours)')
@utils.arg('--endtime', metavar='<UTC_END_TIME>',
help='measurements <= UTC time. format: 2014-01-01T00:00:00Z.')
@utils.arg('--offset', metavar='<OFFSET LOCATION>',
help='The offset used to paginate the return data.')
@utils.arg('--limit', metavar='<RETURN LIMIT>',
help='The amount of data to be returned up to the API maximum limit.')
def do_alarm_history_list(mc, args):
'''List alarms state history.'''
fields = {}
if args.dimensions:
fields['dimensions'] = utils.format_parameters(args.dimensions)
if args.starttime:
if args.starttime[0] == '-':
deltaT = time.time() + (int(args.starttime) * 60)
utc = str(datetime.datetime.utcfromtimestamp(deltaT))
utc = utc.replace(" ", "T")[:-7] + 'Z'
args.starttime = utc
fields['start_time'] = args.starttime
if args.endtime:
fields['end_time'] = args.endtime
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarms.history_list(**fields)
except exc.HTTPException as he:
raise exc.CommandError(
'HTTPException code=%s message=%s' %
(he.code, he.message))
else:
output_alarm_history(args, alarm)
| |
import functools, hashlib
import numpy as np
from scipy.stats import norm as normal_dbn
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from selection.learning.learners import mixture_learner
from selection.learning.utils import naive_partial_model_inference, pivot_plot
from selection.learning.core import gbm_fit_sk, infer_general_target
def probit_MLE(X, y, formula_terms, truth=None, alpha=0.1):
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('y', y)
rpy.r('D = data.frame(X, y)')
rpy.r('M = glm(y ~ %s, family=binomial(link="probit"), data=D)' %
' + '.join(formula_terms))
beta_hat = rpy.r('coef(M)')
target_cov = rpy.r('vcov(M)')
if truth is None:
truth = np.zeros_like(beta_hat)
SE = np.sqrt(np.diag(target_cov))
Z = (beta_hat - truth) / SE
Z0 = beta_hat / SE
pvalues = normal_dbn.cdf(Z0)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
pivots = normal_dbn.cdf(Z)
pivots = 2 * np.minimum(pivots, 1 - pivots)
upper = beta_hat + normal_dbn.ppf(1 - 0.5 * alpha) * SE
lower = beta_hat - normal_dbn.ppf(1 - 0.5 * alpha) * SE
covered = (upper > truth) * (lower < truth)
results_df = pd.DataFrame({'naive_pivot':pivots,
'naive_pvalue':pvalues,
'naive_coverage':covered,
'naive_length':upper - lower,
'naive_upper':upper,
'naive_lower':lower,
'variable':formula_terms,
})
return beta_hat, target_cov, results_df
#### A parametric model will need something like this
class probit_step_learner(mixture_learner):
def __init__(self,
algorithm,
observed_selection,
target_cov,
X,
observed_MLE,
observed_Y):
(self.algorithm,
self.observed_outcome,
self.target_cov,
self.X,
self.observed_MLE,
self.observed_Y) = (algorithm,
observed_selection,
target_cov,
X,
observed_MLE,
observed_Y)
n, p = X.shape
var_select = ['X%d' % (i+1) in observed_selection for i in range(p)]
self.X_select = np.hstack([np.ones((n, 1)), X[:,var_select]])
self.observed_target = observed_MLE
self._chol = np.linalg.cholesky(self.target_cov)
self._beta_cov = self.target_cov
def learning_proposal(self):
"""
Return perturbed data and perturbed MLE.
"""
n, s = self.X_select.shape
beta_hat = self.observed_MLE
perturbed_beta = beta_hat.copy()
nidx = np.random.choice(np.arange(s), min(3, s), replace=False)
for idx in nidx:
scale = np.random.choice(self.scales, 1)
perturbed_beta[idx] += (scale * np.random.standard_normal() *
np.sqrt(self._beta_cov[idx, idx]))
linpred = self.X_select.dot(perturbed_beta)
prob = normal_dbn.cdf(linpred)
perturbed_Y = np.random.binomial(1, prob)
perturbed_MLE = probit_MLE(self.X, perturbed_Y, self.observed_outcome)[0]
return perturbed_MLE, perturbed_Y
####
def simulate(n=500, p=10, alpha=0.1, B=2000):
# description of statistical problem
X = np.random.standard_normal((n, p))
y = np.random.binomial(1, 0.5, size=(n,))
truth = np.zeros(p+1)
def algorithm(X, y):
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('y', y)
rpy.r('''
y = as.matrix(y)
D = data.frame(X, y)
glm(y ~ ., family=binomial(link='probit'), data=D)
M = glm(y ~ ., family=binomial(link='probit'), data=D)
M0 = glm(y ~ 1, family=binomial(link='probit'), data=D)
Mselect = step(M, direction='both', scope=list(upper=M, lower=M0), trace=FALSE)
selected_vars = names(coef(Mselect))
''')
selected_vars = ' + '.join(sorted(list(rpy.r('selected_vars'))))
selected_vars = selected_vars.replace('(Intercept)', '1')
numpy2ri.deactivate()
return tuple(selected_vars.split(' + '))
# run selection algorithm
selection_algorithm = functools.partial(algorithm, X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
observed_model = selection_algorithm(y)
proj_truth = np.zeros(len(observed_model)) # null simulation here
MLE, target_cov, naive_df = probit_MLE(X,
y,
observed_model,
truth=proj_truth,
alpha=alpha)
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
s = len(observed_model)
learner = probit_step_learner(selection_algorithm,
observed_model,
target_cov,
X,
MLE,
y)
print(observed_model)
results = infer_general_target(observed_model,
MLE,
target_cov,
learner,
hypothesis=proj_truth,
fit_probability=gbm_fit_sk,
fit_args={'n_estimators':5000},
alpha=alpha,
B=B)
for result, true_target in zip(results, proj_truth):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target) * (interval[1] > true_target))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'target':proj_truth,
'variable':list(observed_model),
'id':[instance_id]*len(pivots),
})
df = pd.merge(df, naive_df, on='variable')
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(2000):
df = simulate(B=1000)
csvfile = 'probit_step_both.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import itertools
import random
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from sqlalchemy import sql
from neutron._i18n import _LE, _LW
from neutron.common import constants
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_hamode_db
from neutron.extensions import availability_zone as az_ext
LOG = logging.getLogger(__name__)
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
@six.add_metaclass(abc.ABCMeta)
class L3Scheduler(object):
def __init__(self):
self.min_ha_agents = cfg.CONF.min_l3_agents_per_router
self.max_ha_agents = cfg.CONF.max_l3_agents_per_router
@abc.abstractmethod
def schedule(self, plugin, context, router_id,
candidates=None, hints=None):
"""Schedule the router to an active L3 agent.
Schedule the router only if it is not already scheduled.
"""
pass
def _router_has_binding(self, context, router_id, l3_agent_id):
router_binding_model = l3_agentschedulers_db.RouterL3AgentBinding
query = context.session.query(router_binding_model)
query = query.filter(router_binding_model.router_id == router_id,
router_binding_model.l3_agent_id == l3_agent_id)
return query.count() > 0
def _filter_unscheduled_routers(self, context, plugin, routers):
"""Filter from list of routers the ones that are not scheduled."""
unscheduled_routers = []
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router['id']])
if l3_agents:
LOG.debug('Router %(router_id)s has already been '
'hosted by L3 agent %(agent_id)s',
{'router_id': router['id'],
'agent_id': l3_agents[0]['id']})
else:
unscheduled_routers.append(router)
return unscheduled_routers
def _get_unscheduled_routers(self, context, plugin):
"""Get routers with no agent binding."""
# TODO(gongysh) consider the disabled agent's router
no_agent_binding = ~sql.exists().where(
l3_db.Router.id ==
l3_agentschedulers_db.RouterL3AgentBinding.router_id)
query = context.session.query(l3_db.Router.id).filter(no_agent_binding)
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
if unscheduled_router_ids:
return plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
return []
def _get_routers_to_schedule(self, context, plugin, router_ids=None):
"""Verify that the routers specified need to be scheduled.
:param context: the context
:param plugin: the core plugin
:param router_ids: the list of routers to be checked for scheduling
:returns: the list of routers to be scheduled
"""
if router_ids is not None:
routers = plugin.get_routers(context, filters={'id': router_ids})
return self._filter_unscheduled_routers(context, plugin, routers)
else:
return self._get_unscheduled_routers(context, plugin)
def _get_routers_can_schedule(self, context, plugin, routers, l3_agent):
"""Get the subset of routers that can be scheduled on the L3 agent."""
ids_to_discard = set()
for router in routers:
# check if the l3 agent is compatible with the router
candidates = plugin.get_l3_agent_candidates(
context, router, [l3_agent])
if not candidates:
ids_to_discard.add(router['id'])
return [r for r in routers if r['id'] not in ids_to_discard]
def auto_schedule_routers(self, plugin, context, host, router_ids):
"""Schedule non-hosted routers to L3 Agent running on host.
If router_ids is given, each router in router_ids is scheduled
if it is not scheduled yet. Otherwise all unscheduled routers
are scheduled.
Do not schedule the routers which are hosted already
by active l3 agents.
:returns: True if routers have been successfully assigned to host
"""
l3_agent = plugin.get_enabled_agent_on_host(
context, constants.AGENT_TYPE_L3, host)
if not l3_agent:
return False
unscheduled_routers = self._get_routers_to_schedule(
context, plugin, router_ids)
if not unscheduled_routers:
if utils.is_extension_supported(
plugin, constants.L3_HA_MODE_EXT_ALIAS):
return self._schedule_ha_routers_to_additional_agent(
plugin, context, l3_agent)
target_routers = self._get_routers_can_schedule(
context, plugin, unscheduled_routers, l3_agent)
if not target_routers:
LOG.warn(_LW('No routers compatible with L3 agent configuration '
'on host %s'), host)
return False
self._bind_routers(context, plugin, target_routers, l3_agent)
return True
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
current_l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if current_l3_agents:
LOG.debug('Router %(router_id)s has already been hosted '
'by L3 agent %(agent_id)s',
{'router_id': sync_router['id'],
'agent_id': current_l3_agents[0]['id']})
return []
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_LW('No active L3 agents'))
return []
candidates = plugin.get_l3_agent_candidates(context,
sync_router,
active_l3_agents)
if not candidates:
LOG.warn(_LW('No L3 agents can host the router %s'),
sync_router['id'])
return candidates
def _bind_routers(self, context, plugin, routers, l3_agent):
for router in routers:
if router.get('ha'):
if not self._router_has_binding(context, router['id'],
l3_agent.id):
self.create_ha_port_and_bind(
plugin, context, router['id'],
router['tenant_id'], l3_agent)
else:
self.bind_router(context, router['id'], l3_agent)
def bind_router(self, context, router_id, chosen_agent):
"""Bind the router to the l3 agent which has been chosen."""
try:
with context.session.begin(subtransactions=True):
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
except db_exc.DBDuplicateEntry:
LOG.debug('Router %(router_id)s has already been scheduled '
'to L3 agent %(agent_id)s.',
{'agent_id': chosen_agent.id,
'router_id': router_id})
return
except db_exc.DBReferenceError:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
return
LOG.debug('Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def _schedule_router(self, plugin, context, router_id,
candidates=None):
sync_router = plugin.get_router(context, router_id)
candidates = candidates or self._get_candidates(
plugin, context, sync_router)
if not candidates:
return
elif sync_router.get('ha', False):
chosen_agents = self._bind_ha_router(plugin, context,
router_id, candidates)
if not chosen_agents:
return
chosen_agent = chosen_agents[-1]
else:
chosen_agent = self._choose_router_agent(
plugin, context, candidates)
self.bind_router(context, router_id, chosen_agent)
return chosen_agent
@abc.abstractmethod
def _choose_router_agent(self, plugin, context, candidates):
"""Choose an agent from candidates based on a specific policy."""
pass
@abc.abstractmethod
def _choose_router_agents_for_ha(self, plugin, context, candidates):
"""Choose agents from candidates based on a specific policy."""
pass
def _get_num_of_agents_for_ha(self, candidates_count):
return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents
else candidates_count)
def _enough_candidates_for_ha(self, candidates):
if not candidates or len(candidates) < self.min_ha_agents:
LOG.error(_LE("Not enough candidates, a HA router needs at least "
"%s agents"), self.min_ha_agents)
return False
return True
def create_ha_port_and_bind(self, plugin, context, router_id,
tenant_id, agent):
"""Creates and binds a new HA port for this agent."""
ha_network = plugin.get_ha_network(context, tenant_id)
try:
port_binding = plugin.add_ha_port(context.elevated(), router_id,
ha_network.network.id, tenant_id)
with db_api.autonested_transaction(context.session):
port_binding.l3_agent_id = agent['id']
except db_exc.DBDuplicateEntry:
LOG.debug("Router %(router)s already scheduled for agent "
"%(agent)s", {'router': router_id, 'agent': agent['id']})
self.bind_router(context, router_id, agent)
def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None):
"""Return a mapping (router, # agents) matching specified filters."""
return plugin.get_ha_routers_l3_agents_count(context)
def _schedule_ha_routers_to_additional_agent(self, plugin, context, agent):
"""Bind already scheduled routers to the agent.
Retrieve the number of agents per router and check if the router has
to be scheduled on the given agent if max_l3_agents_per_router
is not yet reached.
"""
routers_agents = self.get_ha_routers_l3_agents_counts(context, plugin,
agent)
scheduled = False
admin_ctx = context.elevated()
for router, agents in routers_agents:
max_agents_not_reached = (
not self.max_ha_agents or agents < self.max_ha_agents)
if max_agents_not_reached:
if not self._router_has_binding(admin_ctx, router['id'],
agent.id):
self.create_ha_port_and_bind(plugin, admin_ctx,
router['id'],
router['tenant_id'],
agent)
scheduled = True
return scheduled
def _bind_ha_router_to_agents(self, plugin, context, router_id,
chosen_agents):
port_bindings = plugin.get_ha_router_port_bindings(context,
[router_id])
for port_binding, agent in zip(port_bindings, chosen_agents):
try:
with db_api.autonested_transaction(context.session):
port_binding.l3_agent_id = agent.id
self.bind_router(context, router_id, agent)
except db_exc.DBDuplicateEntry:
LOG.debug("Router %(router)s already scheduled for agent "
"%(agent)s", {'router': router_id,
'agent': agent.id})
else:
LOG.debug('HA Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s)',
{'router_id': router_id, 'agent_id': agent.id})
def _bind_ha_router(self, plugin, context, router_id, candidates):
"""Bind a HA router to agents based on a specific policy."""
if not self._enough_candidates_for_ha(candidates):
return
chosen_agents = self._choose_router_agents_for_ha(
plugin, context, candidates)
self._bind_ha_router_to_agents(plugin, context, router_id,
chosen_agents)
return chosen_agents
class ChanceScheduler(L3Scheduler):
"""Randomly allocate an L3 agent for a router."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
return random.choice(candidates)
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
return random.sample(candidates, num_agents)
class LeastRoutersScheduler(L3Scheduler):
"""Allocate to an L3 agent with the least number of routers bound."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
candidate_ids = [candidate['id'] for candidate in candidates]
chosen_agent = plugin.get_l3_agent_with_min_routers(
context, candidate_ids)
return chosen_agent
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
return ordered_agents[:num_agents]
class AZLeastRoutersScheduler(LeastRoutersScheduler):
"""Availability zone aware scheduler.
If a router is ha router, allocate L3 agents distributed AZs
according to router's az_hints.
"""
def _get_az_hints(self, router):
return (router.get(az_ext.AZ_HINTS) or
cfg.CONF.default_availability_zones)
def _get_routers_can_schedule(self, context, plugin, routers, l3_agent):
"""Overwrite L3Scheduler's method to filter by availability zone."""
target_routers = []
for r in routers:
az_hints = self._get_az_hints(r)
if not az_hints or l3_agent['availability_zone'] in az_hints:
target_routers.append(r)
if not target_routers:
return
return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule(
context, plugin, target_routers, l3_agent)
def _get_candidates(self, plugin, context, sync_router):
"""Overwrite L3Scheduler's method to filter by availability zone."""
all_candidates = (
super(AZLeastRoutersScheduler, self)._get_candidates(
plugin, context, sync_router))
candidates = []
az_hints = self._get_az_hints(sync_router)
for agent in all_candidates:
if not az_hints or agent['availability_zone'] in az_hints:
candidates.append(agent)
return candidates
def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None):
"""Overwrite L3Scheduler's method to filter by availability zone."""
all_routers_agents = (
super(AZLeastRoutersScheduler, self).
get_ha_routers_l3_agents_counts(context, plugin, filters))
if filters is None:
return all_routers_agents
routers_agents = []
for router, agents in all_routers_agents:
az_hints = self._get_az_hints(router)
if az_hints and filters['availability_zone'] not in az_hints:
continue
routers_agents.append((router, agents))
return routers_agents
def _choose_router_agents_for_ha(self, plugin, context, candidates):
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
num_agents = self._get_num_of_agents_for_ha(len(ordered_agents))
# Order is kept in each az
group_by_az = collections.defaultdict(list)
for agent in ordered_agents:
az = agent['availability_zone']
group_by_az[az].append(agent)
selected_agents = []
for az, agents in itertools.cycle(group_by_az.items()):
if not agents:
continue
selected_agents.append(agents.pop(0))
if len(selected_agents) >= num_agents:
break
return selected_agents
| |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# from scipy.misc import imread, imresize, imsave, fromimage, toimage
from utils import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
"""
Neural Style Transfer with Keras 2.0.5
Based on:
https://github.com/keras-team/keras-io/blob/master/examples/generative/neural_style_transfer.py
Contains few improvements suggested in the paper Improving the Neural Algorithm of Artistic Style
(http://arxiv.org/abs/1605.04603).
-----------------------------------------------------------------------------------------------------------------------
"""
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--style_masks", type=str, default=None, nargs='+',
help='Masks for style images')
parser.add_argument("--content_mask", type=str, default=None,
help='Masks for the content image')
parser.add_argument("--color_mask", type=str, default=None,
help='Mask for color preservation')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--model", default="vgg16", type=str,
help="Choices are 'vgg16' and 'vgg19'")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--rescale_image", dest="rescale_image", default="False", type=str,
help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str,
help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str,
help="Maintain aspect ratio of loaded images")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
parser.add_argument("--pool_type", dest="pool", default="max", type=str,
help='Pooling type. Can be "ave" for average pooling or "max" for max pooling')
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument('--min_improvement', default=0.0, type=float,
help='Defines minimum improvement required to continue script')
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
''' Arguments '''
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.syle_image_paths
result_prefix = args.result_prefix
style_image_paths = []
for style_image_path in style_reference_image_paths:
style_image_paths.append(style_image_path)
style_masks_present = args.style_masks is not None
mask_paths = []
if style_masks_present:
for mask_path in args.style_masks:
mask_paths.append(mask_path)
if style_masks_present:
assert len(style_image_paths) == len(mask_paths), "Wrong number of style masks provided.\n" \
"Number of style images = %d, \n" \
"Number of style mask paths = %d." % \
(len(style_image_paths), len(style_masks_present))
content_mask_present = args.content_mask is not None
content_mask_path = args.content_mask
color_mask_present = args.color_mask is not None
rescale_image = str_to_bool(args.rescale_image)
maintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)
preserve_color = str_to_bool(args.color)
# these are the weights of the different loss components
content_weight = args.content_weight
total_variation_weight = args.tv_weight
style_weights = []
if len(style_image_paths) != len(args.style_weight):
print("Mismatch in number of style images provided and number of style weights provided. \n"
"Found %d style images and %d style weights. \n"
"Equally distributing weights to all other styles." % (len(style_image_paths), len(args.style_weight)))
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
for style_weight in args.style_weight:
style_weights.append(style_weight * args.style_scale)
# Decide pooling function
pooltype = str(args.pool).lower()
assert pooltype in ["ave", "max"], 'Pooling argument is wrong. Needs to be either "ave" or "max".'
pooltype = 1 if pooltype == "ave" else 0
read_mode = "gray" if args.init_image == "gray" else "color"
# dimensions of the generated picture.
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
assert args.content_loss_type in [0, 1, 2], "Content Loss Type must be one of 0, 1 or 2"
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if mode == "L":
# Expand the 1 channel grayscale to 3 channel grayscale image
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = args.img_size
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_data_format() == "channels_first":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_data_format() == "channels_first":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# util function to preserve image color
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space
if mask is None:
generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space
return generated
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_data_format() == "channels_first":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L") # Grayscale mask load
mask = imresize(mask, (width, height)).astype('float32')
# Perform binarization of mask
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_data_format() == "channels_first":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
# this will contain our generated image
if K.image_data_format() == "channels_first":
combination_image = K.placeholder((1, 3, img_width, img_height))
else:
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_data_format() == "channels_first":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
if K.image_data_format() == "channels_first":
if args.model == "vgg19":
weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
if args.model == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights)
if K.backend() == 'tensorflow' and K.image_data_format() == "channels_first":
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# Improvement 1
# the gram matrix of an image tensor (feature-wise outer product) using shifted activations
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_data_format() == "channels_first":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features - 1, K.transpose(features - 1))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_data_format() == "channels_first" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif args.content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
if K.image_data_format() == "channels_first":
a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
if args.model == "vgg19":
feature_layers = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4',
'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4']
else:
feature_layers = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3',
'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3']
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss = loss + content_weight * content_loss(base_image_features,
combination_features)
# Improvement 2
# Use all layers for style feature extraction and reconstruction
nb_layers = len(feature_layers) - 1
style_masks = []
if style_masks_present:
style_masks = mask_paths # If mask present, pass dictionary of masks to style loss
else:
style_masks = [None for _ in range(nb_style_images)] # If masks not present, pass None to the style loss
channel_index = 1 if K.image_data_format() == "channels_first" else -1
# Improvement 3 : Chained Inference without blurring
for i in range(len(feature_layers) - 1):
layer_features = outputs_dict[feature_layers[i]]
shape = shape_dict[feature_layers[i]]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl1 = []
for j in range(nb_style_images):
sl1.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
layer_features = outputs_dict[feature_layers[i + 1]]
shape = shape_dict[feature_layers[i + 1]]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl2 = []
for j in range(nb_style_images):
sl2.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
for j in range(nb_style_images):
sl = sl1[j] - sl2[j]
# Improvement 4
# Geometric weighted scaling of style loss
loss = loss + (style_weights[j] / (2 ** (nb_layers - (i + 1)))) * sl
loss = loss + total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
if K.image_data_format() == "channels_first":
x = x.reshape((1, 3, img_width, img_height))
else:
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_data_format() == "channels_first":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image, read_mode=read_mode)
# We require original image if we are to preserve color in YCbCr mode
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
if K.image_data_format() == "channels_first":
color_mask_shape = (None, None, img_width, img_height)
else:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = args.num_iter
prev_min_val = -1
improvement_threshold = float(args.min_improvement)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print("Current loss value:", min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + "_at_iteration_%d.png" % (i + 1)
imsave(fname, img)
end_time = time.time()
print("Image saved as", fname)
print("Iteration %d completed in %ds" % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." %
(improvement, improvement_threshold))
exit()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from cinderclient import exceptions as cinder_exception
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
# Available consumer choices associated with QOS Specs
CONSUMER_CHOICES = (
('back-end', _('back-end')),
('front-end', _('front-end')),
('both', pgettext_lazy('Both of front-end and back-end', u'both')),
)
VERSIONS = base.APIVersionManager("volume", preferred_version=2)
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata', 'volume_image_metadata', 'encrypted', 'transfer']
@property
def is_bootable(self):
return self.bootable == 'true'
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
class VolumeType(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'extra_specs', 'created_at', 'encryption',
'associated_qos_spec', 'description',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone']
_volume = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
class QosSpecs(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'consumer', 'specs']
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
class QosSpec(object):
def __init__(self, id, key, val):
self.id = id
self.key = key
self.value = val
class VolumeTransfer(base.APIResourceWrapper):
_attrs = ['id', 'name', 'created_at', 'volume_id', 'auth_key']
class VolumePool(base.APIResourceWrapper):
_attrs = ['name', 'pool_name', 'total_capacity_gb', 'free_capacity_gb',
'allocated_capacity_gb', 'QoS_support', 'reserved_percentage',
'volume_backend_name', 'vendor_name', 'driver_version',
'storage_protocol', 'extra_specs']
@memoized
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog.")
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
raise
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def version_get():
api_version = VERSIONS.get_active_version()
return api_version['version']
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
volumes = []
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
volume_data.transfer = None
if volume_data.status == 'awaiting-transfer':
for transfer in transfer_list(request):
if transfer.volume_id == volume_id:
volume_data.transfer = transfer
break
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_reset_state(request, volume_id, state):
return cinderclient(request).volumes.reset_state(volume_id, state)
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
force,
image_name,
container_format,
disk_format)
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
def volume_migrate(request, volume_id, host, force_host_copy=False):
return cinderclient(request).volumes.migrate_volume(volume_id,
host,
force_host_copy)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request, search_opts=None):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list(
search_opts=search_opts)]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
snapshot_data = _replace_v2_parameters(snapshot_data)
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup.
"""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {})
return cinder_config.get('enable_backup', False)
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeBackup(b) for b in c_client.backups.list()]
def volume_backup_create(request,
volume_id,
container_name,
name,
description):
backup = cinderclient(request).backups.create(
volume_id,
container=container_name,
name=name,
description=description)
return VolumeBackup(backup)
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
def volume_manage(request,
host,
identifier,
id_type,
name,
description,
volume_type,
availability_zone,
metadata,
bootable):
source = {id_type: identifier}
return cinderclient(request).volumes.manage(
host=host,
ref=source,
name=name,
description=description,
volume_type=volume_type,
availability_zone=availability_zone,
metadata=metadata,
bootable=bootable)
def volume_unmanage(request, volume_id):
return cinderclient(request).volumes.unmanage(volume=volume_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list_with_qos_associations(request):
vol_types = volume_type_list(request)
vol_types_dict = {}
# initialize and build a dictionary for lookup access below
for vol_type in vol_types:
vol_type.associated_qos_spec = ""
vol_types_dict[vol_type.id] = vol_type
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
# update volume type to hold this association info
vol_type = vol_types_dict[assoc_vol_type.id]
vol_type.associated_qos_spec = qos_spec.name
return vol_types
def volume_type_get_with_qos_association(request, volume_type_id):
vol_type = volume_type_get(request, volume_type_id)
vol_type.associated_qos_spec = ""
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
if vol_type.id == assoc_vol_type.id:
# update volume type to hold this association info
vol_type.associated_qos_spec = qos_spec.name
return vol_type
return vol_type
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name, description=None):
return cinderclient(request).volume_types.create(name, description)
def volume_type_update(request, volume_type_id, name=None, description=None):
return cinderclient(request).volume_types.update(volume_type_id,
name,
description)
@memoized
def volume_type_default(request):
return cinderclient(request).volume_types.default()
def volume_type_delete(request, volume_type_id):
try:
return cinderclient(request).volume_types.delete(volume_type_id)
except cinder_exception.BadRequest:
raise exceptions.BadRequest(_(
"This volume type is used by one or more volumes."))
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys([keys])
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
def qos_spec_get_keys(request, qos_spec_id, raw=False):
spec = qos_spec_get(request, qos_spec_id)
qos_specs = spec.specs
if raw:
return spec
return [QosSpec(qos_spec_id, key, value) for
key, value in qos_specs.items()]
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
def qos_specs_list(request):
return [QosSpecs(s) for s in qos_spec_list(request)]
@memoized
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# In some cases, the absolute limits data in Cinder can get
# out of sync causing the total.*Used limits to return
# negative values instead of 0. For such cases, replace
# negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def service_list(request):
return cinderclient(request).services.list()
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@memoized
def list_extensions(request):
return cinder_list_extensions.ListExtManager(cinderclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def transfer_list(request, detailed=True, search_opts=None):
"""To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
def transfer_get(request, transfer_id):
transfer_data = cinderclient(request).transfers.get(transfer_id)
return VolumeTransfer(transfer_data)
def transfer_create(request, transfer_id, name):
volume = cinderclient(request).transfers.create(transfer_id, name)
return VolumeTransfer(volume)
def transfer_accept(request, transfer_id, auth_key):
return cinderclient(request).transfers.accept(transfer_id, auth_key)
def transfer_delete(request, transfer_id):
return cinderclient(request).transfers.delete(transfer_id)
def pool_list(request, detailed=False):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumePool(v) for v in c_client.pools.list(
detailed=detailed)]
| |
from typing import (
List,
Optional,
Set,
Union,
)
from pathlib import Path
from os import environ
from natsort import natsorted
import toml
from ninja_syntax import Writer, PathArg, Rule, Build, Command
def dir_files(path: Union[Path, str], suffix=None, filter_start=False) -> Set[Path]:
assert not suffix or suffix.startswith(".")
path = Path(path)
def suffix_ok(s):
if suffix is None:
return True
elif isinstance(suffix, str):
return s == suffix
else:
return s in suffix
def start_ok(s):
if not filter_start:
return True
return not (s.startswith("_") or s.startswith("."))
return {
p
for p in path.iterdir()
if p.is_file() and suffix_ok(p.suffix) and start_ok(p.stem)
}
def sort_paths(paths) -> List[Path]:
return natsorted(list(paths)) # type: ignore
def subdirs(path: Union[Path, str]) -> Set[Path]:
return {p for p in Path(path).iterdir() if p.is_dir()}
OUTPUT_FILE = environ.get("NG_OUTPUT", "build.ninja")
KERNEL_FEATURES = environ.get("KERNEL_FEATURES", "")
ROOT_DIR = Path(".")
CREATE_DIRS = [
ROOT_DIR / "build",
ROOT_DIR / "build/boot",
ROOT_DIR / "build/modules",
ROOT_DIR / "build/asm_routines",
]
CCGEN_FILES = sort_paths(dir_files(ROOT_DIR / "build_config/constants/", ".toml", True))
CCGEN_OPTFILE = ROOT_DIR / "build_config/constants/_options.toml"
CCGEN_OUTDIR = ROOT_DIR / "build"
TARGET = "d7os"
# A disk of 0x5000 0x200-byte sectors, 10 * 2**20 bytes, ten mebibytes
DISK_SIZE_SECTORS = 0x5000
DISK_SIZE_BYTES = 0x200 * DISK_SIZE_SECTORS
IMAGE_MAX_SIZE_SECTORS = 0x500
IMAGE_MAX_SIZE_BYTES = 0x200 * DISK_SIZE_SECTORS
assert DISK_SIZE_BYTES % 0x10000 == 0
class files:
# Config files
KERNEL_LINKER_SCRIPT = ROOT_DIR / "build_config/linker.ld"
# Output/intermediate artifacts
BOOT0 = ROOT_DIR / "build/boot/stage0.bin"
BOOT1 = ROOT_DIR / "build/boot/stage1.bin"
BOOT2 = ROOT_DIR / "build/boot/stage2.bin"
KERNEL_ORIGINAL = ROOT_DIR / "build/kernel_original.elf"
KERNEL_STRIPPED = ROOT_DIR / "build/kernel_stripped.elf"
DISK_IMG = ROOT_DIR / "build/disk.img"
def cmd_nasm(format: str, output: Path, inputs: List[Path]) -> Command:
return Rule(
"nasm",
description="Nasm",
command=f"nasm -f {format} -o {output} " + " ".join(map(str, inputs)),
outputs=[output],
).extend_to_command(inputs=inputs)
def cmd_stripped_copy(original: Path, stripped: Path) -> Command:
return Rule(
"strip",
description=f"strip {original.stem} to {stripped}",
command=[
f"cp {original} {stripped}",
f"strip {stripped}",
],
outputs=[stripped],
).extend_to_command(inputs=[original])
def cmd_linker(linker_script: Path, output: Path, inputs: List[Path]) -> Command:
return Rule(
"linker",
description="Invoke linker",
command=[
f"ld -z max-page-size=0x1000 --gc-sections -T {linker_script} -o {output} "
+ " ".join(map(str, inputs)),
],
outputs=[output],
).extend_to_command(inputs=inputs)
def cmd_cargo_bin(pdir: Path, binary: str) -> Command:
with (pdir / "Cargo.toml").open("r") as f:
cratename = toml.load(f)["package"]["name"]
return Rule(
"cargo_bin",
outputs=[pdir / "target" / "debug" / binary],
description="Invoke cargo to build a native binary.",
command=[
f"cd {pdir.resolve(strict=True)}",
f"cargo build --color=always --bin {binary}",
"cd -",
],
depfile=pdir / f"target/debug/{binary}.d",
).extend_to_command()
def cmd_cargo_cross(
pdir: Path, target_json: Path, features: Optional[str] = None
) -> Command:
with (pdir / "Cargo.toml").open("r") as f:
cratename = toml.load(f)["package"]["name"]
return Rule(
"cargo_cross",
description=f"Invoke cargo in cross-compiler mode for {pdir.stem or 'kernel'}",
outputs=[pdir / "target" / target_json.stem / "release" / f"lib{cratename}.a"],
command=[
f"cd {pdir.resolve(strict=True)}",
"RUSFLAGS='-g -C opt-level=2'"
+ f" cargo build --target {target_json.resolve(strict=True)} --color=always --release"
+ " -Z build-std=core,alloc -Z build-std-features=compiler-builtins-mem"
+ (f" --features {features}" if features else ""),
"cd -",
],
depfile=pdir / f"target/{target_json.stem}/release/lib{cratename}.d",
).extend_to_command()
# Read initrd file list
with open(ROOT_DIR / "build_config/initrd_files.txt") as f:
initrd_files = {}
for line in f:
line = line.split("#", 1)[0].strip()
if line:
assert line.count("=") == 1, f"Invalid line {line !r}"
l, r = line.split("=")
initrd_files[l] = r
(ROOT_DIR / "build").mkdir(exist_ok=True)
with open(OUTPUT_FILE, "w") as f:
w = Writer(f)
# Settings
w.variable("ninja_required_version", "1.10")
w.variable("builddir", "build/")
# Build steps
w.command(
Rule(
"setup_build_fs",
description="Build filesystem structure",
command=[f"mkdir -p {d}" for d in CREATE_DIRS],
outputs=CREATE_DIRS,
).extend_to_command(inputs=[])
)
w.command(
Rule(
"create_disk",
description="Create a disk image and write bootloader and kernel",
command=[
f"dd if=/dev/zero of={files.DISK_IMG} bs={0x10000}"
+ f" count={DISK_SIZE_BYTES // 0x10000} conv=notrunc",
f"dd if={files.BOOT0} of={files.DISK_IMG} conv=notrunc bs=512 seek=0 count=1",
f"dd if={files.BOOT1} of={files.DISK_IMG} conv=notrunc bs=512 seek=1 count=1",
f"dd if={files.BOOT2} of={files.DISK_IMG} conv=notrunc bs=512 seek=2 count=4",
f"dd if={files.KERNEL_STRIPPED} of={files.DISK_IMG} conv=notrunc bs=512 seek=6",
" ".join(
[
str(ROOT_DIR / "libs/d7initrd/target/debug/mkimg"),
str(files.DISK_IMG),
"$$(python -c 'import os; print(os.stat(\""
+ str(files.KERNEL_STRIPPED)
+ '").st_size // 0x200 + 8)'
"')",
]
+ [f"{l.strip()}={r.strip()}" for l, r in initrd_files.items()]
),
],
outputs=[files.DISK_IMG],
).extend_to_command(
inputs=[
files.BOOT0,
files.BOOT1,
files.BOOT2,
files.KERNEL_STRIPPED,
ROOT_DIR / "build/process_common.bin",
ROOT_DIR / "libs/d7initrd/target/debug/mkimg",
]
+ [Path(v) for v in initrd_files.values()]
)
)
w.command(
Rule(
"constcodegen",
description="Run constcodegen",
command=f"constcodegen --options {CCGEN_OPTFILE} -t {CCGEN_OUTDIR} "
+ " ".join(map(str, CCGEN_FILES)),
outputs=[CCGEN_OUTDIR / "constants.rs", CCGEN_OUTDIR / "constants.asm"],
).extend_to_command(
inputs=[CCGEN_OPTFILE] + CCGEN_FILES,
)
)
w.command(
cmd_nasm(
format="bin",
output=ROOT_DIR / "build/boot/stage0.bin",
inputs=[ROOT_DIR / "src/boot/stage0.asm"],
).add_input(ROOT_DIR / "build/constants.asm")
)
w.command(
cmd_nasm(
format="bin",
output=ROOT_DIR / "build/boot/stage1.bin",
inputs=[ROOT_DIR / "src/boot/stage1.asm"],
).add_input(ROOT_DIR / "build/constants.asm")
)
w.command(
cmd_nasm(
format="elf64",
output=ROOT_DIR / "build/boot/entry.elf",
inputs=[ROOT_DIR / "libs/d7boot/src/entry.asm"],
).add_input(ROOT_DIR / "build/constants.asm")
)
w.command(
cmd_cargo_cross(
pdir=ROOT_DIR / "libs/d7boot/",
target_json=ROOT_DIR / "d7os.json",
)
)
w.command(
cmd_linker(
linker_script=ROOT_DIR / "libs/d7boot/linker.ld",
output=ROOT_DIR / "build/boot/stage2.elf",
inputs=[
ROOT_DIR / "build/boot/entry.elf",
ROOT_DIR / "libs/d7boot/target/d7os/release/libd7boot.a",
],
)
)
w.command(
Rule(
name="elf2bin_bootstage2",
command=" ".join(
map(
str,
[
ROOT_DIR / "libs/elf2bin/target/debug/elf2bin",
ROOT_DIR / "build/boot/stage2.elf",
ROOT_DIR / "build/boot/stage2.bin",
],
)
),
outputs=[ROOT_DIR / "build/boot/stage2.bin"],
).extend_to_command(
inputs=[
ROOT_DIR / "build/boot/stage2.elf",
ROOT_DIR / "libs/elf2bin/target/debug/elf2bin",
]
)
)
w.command(
cmd_nasm(
format="elf64",
output=ROOT_DIR / "build/kernel_entry.o",
inputs=[ROOT_DIR / "src/entry.asm"],
)
.add_input(ROOT_DIR / "build/constants.asm")
.add_input(ROOT_DIR / "build/constants.rs")
)
w.command(
cmd_nasm(
format="bin",
output=ROOT_DIR / "build/smp_ap_startup.bin",
inputs=[ROOT_DIR / "src/asm_misc/smp_ap_startup.asm"],
).add_input(ROOT_DIR / "build/constants.asm")
)
w.command(
cmd_nasm(
format="bin",
output=ROOT_DIR / "build/process_common.bin",
inputs=[ROOT_DIR / "src/asm_misc/process_common.asm"],
).add_input(ROOT_DIR / "build/constants.asm")
)
asm_routines = []
for path in dir_files(ROOT_DIR / "src/asm_routines", ".asm"):
w.command(
cmd_nasm(
format="elf64",
output=ROOT_DIR / "build/asm_routines" / (path.stem + ".o"),
inputs=[path],
).add_input(ROOT_DIR / "build/constants.asm")
)
asm_routines.append(ROOT_DIR / "build/asm_routines" / (path.stem + ".o"))
w.command(
cmd_linker(
linker_script=files.KERNEL_LINKER_SCRIPT,
output=files.KERNEL_ORIGINAL,
inputs=[
ROOT_DIR / "build/kernel_entry.o",
ROOT_DIR / "target/d7os/release/libd7os.a",
]
+ asm_routines,
)
)
# Kernel
w.command(
cmd_cargo_cross(
pdir=ROOT_DIR,
target_json=ROOT_DIR / "d7os.json",
features=KERNEL_FEATURES,
)
.add_input(ROOT_DIR / "build/constants.rs")
.add_input(ROOT_DIR / "build/smp_ap_startup.bin")
)
w.command(
cmd_stripped_copy(
files.KERNEL_ORIGINAL,
files.KERNEL_STRIPPED,
)
)
# Utility binaries
for (pdir, binary) in [
(ROOT_DIR / "libs/d7initrd/", "mkimg"),
(ROOT_DIR / "libs/elf2bin/", "elf2bin"),
]:
w.command(cmd_cargo_bin(pdir, binary))
# Modules
for path in subdirs(ROOT_DIR / "modules/"):
with (path / "Cargo.toml").open("r") as f:
cratename = toml.load(f)["package"]["name"]
w.comment(f"Module {cratename} at {path}")
w.command(
cmd_cargo_cross(pdir=path, target_json=ROOT_DIR / "libs/d7abi/d7abi.json")
)
w.command(
cmd_linker(
linker_script=ROOT_DIR / "libs/d7abi/linker.ld",
inputs=[path / f"target/d7abi/release/lib{cratename}.a"],
output=ROOT_DIR / "build/modules/" / (path.name + "_original.elf"),
)
)
w.command(
cmd_stripped_copy(
original=ROOT_DIR / "build/modules" / (path.name + "_original.elf"),
stripped=ROOT_DIR / "build/modules" / (path.name + ".elf"),
)
)
w.command(
Rule(
name="check_ok",
description="Check that the resulting image is valid",
command=[f"python3 build_config/validate_build.py {IMAGE_MAX_SIZE_BYTES}"],
outputs=[Path("pseudo-imgsize")], # Pseudo path, not created
).extend_to_command(inputs=[files.KERNEL_STRIPPED])
)
w.default(["pseudo-imgsize", "build/disk.img"])
| |
import sys, os, time, traceback, types
import wx
import Histogram
import random
from manta import *
from csafe import *
from csafe_scene import setup
from wxManta import opj
colors = [] # (pos, r, g, b, a)
class TransferF(wx.Object):
def __init__(self, parent, colorsn, id, title="untitled", cmap = None):
self.parent = parent
self.colors = colorsn
self.id = id
if (cmap != None):
self.colors = []
num = cmap.GetNumSlices()
for i in range(num):
slice = cmap.GetSlice(i)
c = slice.color.color
a = float(slice.color.a)
p = float(slice.value)
c1 = float(SWIGIFYGetColorValue(c, 0))
c2 = float(SWIGIFYGetColorValue(c,1))
c3 = float(SWIGIFYGetColorValue(c,2))
self.colors.append((float(p), float(c1), float(c2), float(c3), float(a)))
empty = False
if len(self.colors) < 1.0:
empty = True
self.colors.append((0.0, 0, 0, 0, 1))
self.colors.append((1.0, 1, 1, 1, 1))
if cmap == None:
if empty == False:
slices = manta_new(vector_ColorSlice())
for i in range(len(colorsn)):
slices.push_back(manta_new(ColorSlice(float(colorsn[i][0]), manta_new(RGBAColor(float(colorsn[i][1]), float(colorsn[i][2]), float(colorsn[i][3]), float(colorsn[i][4]))))))
cmap = manta_new(RGBAColorMap(slices))
else:
cmap = manta_new(RGBAColorMap(1))
self.colors.sort()
self.label = title
self.cmap = cmap
def Clone(self, t):
if t == self:
return
self.colors = []
for i in range(len(t.colors)):
self.colors.append(t.colors[i])
self.colors.sort()
self.UpdateColorMap()
def UpdateColorMap(self):
self.parent.UpdateColorMap(self)
def GetLabel(self):
return self.label
def MoveColor(self, index, pos):
c = ( pos, self.colors[index][1],self.colors[index][2], self.colors[index][3], self.colors[index][4] )
# c = ( 0.5 , 1, 0, 0, 1)
self.colors[index] = c
# self.colors.sort()
def AddColor(self, color, pos):
if (len(color) == 3):
self.colors.append( ( pos, color[0], color[1], color[2], 1.0 ) )
elif (len(color) == 4):
self.colors.append( ( pos, color[0], color[1], color[2], color[3] ) )
else:
blowuphorribly
# self.colors.sort()
def SetColor(self, index, color):
pos = self.colors[index][0]
if (len(color) == 3):
c = ( pos, color[0], color[1], color[2], 1.0 )
elif (len(color) == 4):
c = ( pos, color[0], color[1], color[2], color[3] )
else:
blowuphorribly
self.colors[index] = c
def GetColor(self, pos): # color at position pos, in range [0,1]
colors = []
for i in range(len(self.colors)):
colors.append( (self.colors[i][0], self.colors[i][1], self.colors[i][2], self.colors[i][3], self.colors[i][4] ) )
colors.sort()
if len(colors) < 1:
return (0,0,0,1)
index1 = 0
index2 = 0
for i in range(0, len(colors)):
if (colors[i][0] <= pos):
index1 = i
if (colors[i][0] >= pos and index2 == 0):
index2 = i
if (pos < colors[0][0]):
index2 = 0
if (colors[len(colors) - 1][0] < pos):
index2 = len(colors) - 1
pos1 = colors[index1][0]
pos2 = colors[index2][0]
amt1 = amt2 = 0.5
length = pos2 - pos1
if length > 0.0:
amt1 = (1.0 - (pos - pos1)/length)
amt2 = (1.0 - (pos2 - pos)/length)
if index1 == index2:
amt1 = amt2 = 0.5
a1 = colors[index1][4]
a2 = colors[index2][4]
color = (colors[index1][1]*amt1 + colors[index2][1]*amt2, colors[index1][2]*amt1 + colors[index2][2]*amt2, \
colors[index1][3]*amt1 + colors[index2][3]*amt2, colors[index1][4]*amt1 + colors[index2][4]*amt2)
return color
def GetColorAtIndex(self, index): # get the non interpolated color value
colord = self.colors[index]
return (colord[1], colord[2], colord[3], colord[4])
def GetColorRGB(self, pos):
color = self.GetColor(pos)
return (color[0]*color[3]*255.0, color[1]*color[3]*255.0, color[2]*color[3]*255.0)
# return ( ( 0, 0, 0, 0) )
def RemoveColor(self, index):
self.colors.pop(index)
class TransferFPanel(wx.Panel):
def __init__(self, parent, width, height, transferF, scene, updateFunction=None):
path = setup.csafe_scene_path
self.scene = scene
self.backgroundIMG = wx.Image(opj(path+'images/bckgrnd.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.mouse_value = 0.0
self.paddingW = 20.0
self.paddingH = 20.0
self.transferF = transferF
self.width = width
self.height = height
self.parentC = parent
self.zoomMin = 0.0 # zoom into min value, [0,1]
self.zoomMax = 1.0
self.zoomDMin = 0.0 # data value of zoomMin/max
self.zoomDMax = 1.0
self.absoluteDMin = 0.0 # min/max data values
self.absoluteDMax = 1.0
self.updateFunction = updateFunction
panel = wx.Panel.__init__(self, parent, -1, (0, 0), (width + self.paddingW, height + self.paddingH) )
wx.EVT_PAINT(self, self.OnPaint)
self.histogramGroup = None
self.Update()
self.Bind(wx.EVT_LEFT_DOWN, self.OnClick)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightClick)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
# self.SetFocus()
self.colorSelectorWidth = 10.0
self.selected = None
self.dSelected = None
self.SetBackgroundColour(wx.Colour(90,90,90))
self.Bind( wx.EVT_MOUSEWHEEL, self.OnMouseWheel )
def OnMouseWheel(self, evt):
pos = float(evt.GetPosition().x - self.paddingW/2.0)/float(self.width)
delta = evt.GetWheelDelta()
rot = evt.GetWheelRotation()/delta
# zoom in if rot > 0, out if ro < 0
zoomRange = self.zoomMax - self.zoomMin
zoomAmount = 0.75 # the smaller the more zooming
if (rot > 0):
# self.zooms.append( (self.zoomDMin, self.zoomDMax))
self.zoomMin = (pos-zoomAmount*pos)*zoomRange + self.zoomMin
self.zoomMax = (pos+zoomAmount*(1.0-pos))*zoomRange + self.zoomMin
if (rot < 0):
self.zoomMin -= (pos-zoomAmount*pos)*zoomRange
self.zoomMax += (pos+zoomAmount*(1.0-pos))*zoomRange
if (self.zoomMin < 0.0):
self.zoomMin = 0.0
if (self.zoomMax > 1.0):
self.zoomMax = 1.0
self.Update()
def SetUpdateFunction(self, function):
self.updateFunction = function
def SetTransferF(self, transferF):
self.transferF = transferF
self.Update()
def OnKeyDown(self, evt):
if evt.GetKeyCode() == wx.WXK_DELETE and self.dSelected != None:
self.transferF.RemoveColor(self.dSelected)
self.Update()
self.UpdateHistogram()
def OnRightClick(self, evt):
zoomRange = self.zoomMax-self.zoomMin
x = evt.GetPosition().x - self.paddingW/2.0
y = evt.GetPosition().y - self.paddingH/2.0
pos = float(x)/float(self.width)*zoomRange + self.zoomMin
# did they click on a color picker?
clicked = False
index = -1
for i in range(len(self.transferF.colors)):
colorx = (self.transferF.colors[i][0] - self.zoomMin)/zoomRange
if abs(x - colorx*self.width) < self.colorSelectorWidth/2.0:
clicked = True
index = i
if clicked:
c = self.transferF.colors[index]
color = wx.ColourData()
color.SetColour(wx.Colour(c[1]*255, c[2]*255, c[3]*255))
dlg = wx.ColourDialog(self, color)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
originalC = self.transferF.GetColorAtIndex(index)
data = dlg.GetColourData()
c = data.GetColour().Get()
color = []
color.append(c[0])
color.append(c[1])
color.append(c[2])
color.append(originalC[3])
color[0] /= 255.0
color[1] /= 255.0
color[2] /= 255.0
self.transferF.SetColor(index, color)
self.Update()
self.Refresh()
dlg.Destroy()
else: # add a new one
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
data = dlg.GetColourData()
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
c = data.GetColour().Get()
color = []
color.append(c[0])
color.append(c[1])
color.append(c[2])
color.append(1.0 - float(y)/float(self.height))
color[0] /= 255.0
color[1] /= 255.0
color[2] /= 255.0
self.transferF.AddColor( color, pos)
self.Update()
dlg.Destroy()
self.UpdateHistogram()
def SetHistogramGroup(self, histo):
if (histo.group == 0): # group 0 == Particles.
if self.scene.currentParticleHistogram != None:
self.scene.currentParticleHistogram.Deselect()
histo.Select()
else:
if self.scene.currentVolumeHistogram != None:
self.scene.currentVolumeHistogram.Deselect()
histo.Select()
self.histogramGroup = histo
def UpdateHistogram(self):
self.transferF.UpdateColorMap()
for i in range(len(self.scene.frame.histoGroups)):
histoGroup = self.scene.frame.histoGroups[i]
if histoGroup.transferF == self.transferF:
histoGroup.Update()
def OnClick(self, evt):
zoomRange = self.zoomMax-self.zoomMin
x = evt.GetPosition().x - self.paddingW/2.0
if self.selected == None:
index = -1
for i in range(len(self.transferF.colors)):
colorx = (self.transferF.colors[i][0] - self.zoomMin)/zoomRange
if abs(x - colorx*self.width) < self.colorSelectorWidth/2.0:
clicked = True
index = i
if index >= 0:
self.selected = index
self.dSelected = index
else:
self.dSelected = None
self.selected = None
self.Refresh()
def OnLeftUp(self, evt):
self.selected = None
self.UpdateHistogram()
def OnMotion(self, evt):
zoomRange = self.zoomMax-self.zoomMin
x = evt.GetPosition().x - self.paddingW/2.0
if (x < 0.0):
x = 0.0
if x > self.width:
x = self.width
zoomDRange = self.zoomDMax - self.zoomDMin
self.mouse_value = float(x)/float(self.width)*float(zoomDRange)+float(self.zoomDMin)
self.Refresh()
y = evt.GetPosition().y - self.paddingH/2.0
if y < 0.0:
y = 0.0
if y > self.height:
y = self.height
# Determine if this is a Particle historgram... and if so, don't allow modification of the
# opacity.
if self.histogramGroup != None and self.histogramGroup.group == 0:
y = 0
if self.selected != None:
pos = (float(x) / float(self.width))*zoomRange + self.zoomMin
self.transferF.MoveColor(self.selected, pos)
colord = self.transferF.GetColorAtIndex(self.selected)
a = float(self.height - y)/float(self.height)
if a > 1.0:
a = 1.0
elif a < 0.0:
a = 0.0
color = (colord[0], colord[1], colord[2], a)
self.transferF.SetColor(self.selected, color)
self.Update()
def Update(self):
if (self.histogramGroup != None):
histo = self.histogramGroup.histogram
self.absoluteDMin = histo.colorDMin
self.absoluteDMax = histo.colorDMax
absoluteRange = self.absoluteDMax - self.absoluteDMin
self.zoomDMin = self.absoluteDMin + self.zoomMin*absoluteRange
self.zoomDMax = self.absoluteDMin + self.zoomMax*absoluteRange
width = self.width - 2.0
height = self.height - 2.0
self.barWidth = 1.0
blx = 1.0 + self.barWidth/2.0 + self.paddingW/2.0 # bottom left x
bly = 0.0 + self.height + self.barWidth/2.0 - 2.0 + self.paddingH/2.0
self.lines = []
zoomRange = self.zoomMax - self.zoomMin
for i in range(0, int(width)):
color = self.transferF.GetColor( (float(i)/float(width))*zoomRange +self.zoomMin )
self.lines.append( (color, ( blx + i*self.barWidth, bly, blx + i*self.barWidth, (bly - height) ) ) )
self.parentC.Refresh()
self.Refresh()
if (self.updateFunction != None):
self.updateFunction()
def AddNewColor(self):
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
data = dlg.GetColourData()
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
c = data.GetColour().Get()
color = []
color.append(c[0])
color.append(c[1])
color.append(c[2])
color[0] /= 255.0
color[1] /= 255.0
color[2] /= 255.0
self.transferF.AddColor( color, random.random())
self.Update()
dlg.Destroy()
self.UpdateHistogram()
def DeleteSelected(self):
if self.dSelected != None:
self.transferF.RemoveColor(self.dSelected)
self.Update()
self.UpdateHistogram()
def ChooseColorSelected(self):
if self.dSelected != None:
c = self.transferF.colors[self.dSelected]
color = wx.ColourData()
color.SetColour(wx.Colour(c[1]*255, c[2]*255, c[3]*255))
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
c = data.GetColour().Get()
color = []
color.append(c[0])
color.append(c[1])
color.append(c[2])
color[0] /= 255.0
color[1] /= 255.0
color[2] /= 255.0
self.transferF.SetColor(self.dSelected, color)
self.Update()
self.Refresh()
dlg.Destroy()
self.UpdateHistogram()
def OnPaint(self, evt=None):
pdc = wx.PaintDC(self)
try:
dc = wx.GCDC(pdc)
except:
dc = pdc
lines = self.lines
colors = self.transferF.colors
dc.SetPen(wx.Pen('BLACK', 2) )
left = self.paddingW/2.0
top = self.paddingH/2.0
originalSize = self.GetClientSize()
dc.SetClippingRegion(self.paddingW/2.0,self.paddingH/2.0 ,self.width-1, self.height)
imgWidth = self.backgroundIMG.GetWidth()
imgHeight = self.backgroundIMG.GetHeight()
for x in range(int(self.paddingW/2) + 1, int(self.width ), int(imgWidth)):
for y in range(int(self.paddingH/2.0) +2, int(self.height) , int(imgHeight)):
dc.DrawBitmap(self.backgroundIMG,x,y, True)
# IS THIS THE BROKEN LINE? ->
# dc.SetClippingRegion(0-self.paddingW/2.0,0-self.paddingH/2.0,originalSize.width, originalSize.height)
try:
dc = wx.GCDC(pdc)
except:
dc = pdc
numPixels = self.width
for i in range(0, len(lines)):
a = lines[i][0][3]*255.0
r = lines[i][0][0]*255.0
g = lines[i][0][1]*255.0
b = lines[i][0][2]*255.0
try:
penColor = wx.Colour(r,g,b,a)
except:
penColor = wx.Colour(r,g,b)
dc.SetPen(wx.Pen(penColor, self.barWidth + 1) )
dc.DrawLine( lines[i][1][0], lines[i][1][1], lines[i][1][2], lines[i][1][3])
zoomRange = self.zoomMax-self.zoomMin
for i in range(len(colors)):
colorx = (colors[i][0]-self.zoomMin)/zoomRange
if (colorx < 0.0 or colorx > 1.0):
continue
dc.SetPen(wx.Pen('GRAY', 2) )
color = self.transferF.GetColor( colors[i][0] )
try:
dc.SetBrush(wx.Brush( color, wx.SOLID ) )
except:
dc.SetBrush(wx.Brush( (color[0], color[1], color[2]), wx.SOLID))
if i == self.dSelected:
dc.SetBrush(wx.Brush( (128,128,128), wx.SOLID ) )
recWidth = self.colorSelectorWidth
x = colorx*self.width - recWidth/2.0 + left
y = self.height - recWidth + top
dc.DrawRectangle(x,y - color[3]*self.height + recWidth/2.0, recWidth, recWidth)
dc.SetTextForeground(wx.Colour(0,0,0))
self.SetForegroundColour(wx.Colour(255,0,0))
dc.SetPen(wx.Pen(wx.Colour(255,255,255), 1))
dc.SetBrush(wx.Brush(wx.Colour(255,255,255)))
fontSize = 10
if self.scene.biggify == True:
fontSize = 12
dc.SetFont(wx.Font(fontSize, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.FONTWEIGHT_BOLD))
string = ""
if (self.scene.labels == True):
string += "zoom min: "
string += str("%1.2g" % self.zoomDMin)
extent = dc.GetTextExtent(string)
#xpos = extent[0]/2.0 - self.paddingW/2.0
xpos = self.paddingW/2.0
diff = xpos - self.paddingW/2.0
#if diff < 0:
# xpos -= diff
ypos = self.height - 2
dc.DrawTextPoint(string, (xpos,ypos))
string = ""
if (self.scene.labels == True):
string += "zoom max: "
string += str("%1.2g" % self.zoomDMax)
extent = dc.GetTextExtent(string)
xpos = self.width - extent[0]/2.0 + self.paddingW/2.0
diff = xpos + extent[0] - (self.width + self.paddingW/2.0)
if (diff > 0 ):
xpos -= diff
dc.DrawTextPoint(string, (xpos,ypos))
# draw min/max text
ypos += extent[1]
if self.scene.biggify:
ypos -= 2
string = ""
if (self.scene.labels == True):
string += "color min: "
string += str("%1.2g" %self.absoluteDMin)
extent = dc.GetTextExtent(string)
#xpos = extent[0]/2.0 + self.paddingW/2.0
xpos = self.paddingW/2.0
dc.DrawTextPoint(string, (xpos,ypos))
string = ""
if (self.scene.labels == True):
string += "color max: "
string += str("%1.2g" % self.absoluteDMax)
extent = dc.GetTextExtent(string)
xpos = self.width - extent[0]/2.0 + self.paddingW/2.0
diff = xpos + extent[0] - (self.width + self.paddingW/2.0)
if (diff > 0 ):
xpos -= diff
dc.DrawTextPoint(string, (xpos,ypos))
ypos = self.paddingH/2.0 - extent[1]
string = ""
if (self.scene.labels == True):
string += "mouse value: "
string += str("%1.4g" % self.mouse_value)
extent = dc.GetTextExtent(string)
xpos = self.paddingW/2.0 + self.width/2.0 - extent[0]/2.0
if self.scene.biggify:
ypos += 2
dc.DrawTextPoint(string, (xpos,ypos))
class TransferFGroup(wx.Panel):
def __init__(self, parent, width, height, transferF, title, scene):
path = setup.csafe_scene_path
self.parentC = parent
self.height = height
self.width = width
self.transferF = transferF
wx.Panel.__init__(self, parent, -1, (0, 0) , (width, height) )
self.vs = vs = wx.BoxSizer( wx.VERTICAL )
self.box1_title = wx.StaticBox( self, -1, title )
self.box1_title.SetForegroundColour( wx.WHITE ) # Make label readable!
self.transferFPanel = TransferFPanel(self, width, height, transferF, scene)
box1 = self.box1 = wx.StaticBoxSizer( self.box1_title, wx.VERTICAL )
self.gbs = gbs = wx.GridBagSizer(5,5)
self.sizer = box1
self.scene = scene
gbs.Add(self.transferFPanel,(0, 0), (5, 2) )
bmpNew = wx.Bitmap(opj(path+'images/new_16x16.png'))
bmpDel = wx.Bitmap(opj(path+'images/delete_16x16.png'))
bmpMod = wx.Bitmap(opj(path+'images/color_16x16.png'))
self.newColorB = wx.BitmapButton(self, -1, bmpNew, (0,0), style=wx.NO_BORDER)
self.newColorB.SetToolTip( wx.ToolTip( "Press to choose a new color for chosen color map position." ) )
self.delColorB = wx.BitmapButton(self, -1, bmpDel, (0,0), style=wx.NO_BORDER)
self.delColorB.SetToolTip( wx.ToolTip( "delcolorb: fix me" ) )
self.modifyColorB = wx.BitmapButton(self, -1, bmpMod, (0,0), style=wx.NO_BORDER)
self.modifyColorB.SetToolTip( wx.ToolTip( "Modify Colormap Node (Make sure you select a gray square first.)" ) )
self.presetsB = wx.BitmapButton(self, -1, bmpMod, (0,0), style=wx.NO_BORDER)
self.presetsB.SetToolTip( wx.ToolTip( "Choose Colormap Preset" ) )
gbs.Add( self.newColorB, (0, 2) )
gbs.Add( self.delColorB, (1, 2) )
gbs.Add( self.modifyColorB, (2, 2) )
gbs.Add( self.presetsB, (3,2) )
gbs.Layout()
box1.Add( gbs, -1, wx.ALIGN_CENTRE|wx.ALL, 5 )
vs.Add( box1, -1, wx.ALIGN_CENTRE|wx.ALL, 5 )
#vs.Add(grid1, 0, wx.ALIGN_CENTRE|wx.ALL, 5 )
self.SetSizer( vs )
vs.Fit( self )
self.visible = True
self.Bind(wx.EVT_BUTTON, self.OnClickNew, self.newColorB)
self.Bind(wx.EVT_BUTTON, self.OnClickDelete, self.delColorB)
self.Bind(wx.EVT_BUTTON, self.OnClickModify, self.modifyColorB)
self.Bind(wx.EVT_BUTTON, self.OnClickPresets, self.presetsB)
self.box1_title.SetBackgroundColour(self.scene.bgColor)
self.transferFPanel.SetBackgroundColour(self.scene.bgColor)
def OnClickPresets(self, evt):
menu = wx.Menu()
self.ids = []
for i in range (len(self.scene.frame.transferFunctions)):
popupID = wx.NewId()
menu.Append(popupID, self.scene.frame.transferFunctions[i].label)
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.ids.append(popupID)
self.puNewID = wx.NewId()
self.copyID = wx.NewId()
copy_menu = wx.Menu()
menu.AppendMenu(self.copyID, "Copy From...", copy_menu)
self.copy_ids = []
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: InvRainbowIso")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: InvRainbow")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: Rainbow")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: InvGrayscale")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: InvBlackBody")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: BlackBody")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
popupID = wx.NewId()
copy_menu.Append(popupID, "Default: GreyScale")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
for i in range (len(self.scene.frame.transferFunctions)):
popupID = wx.NewId()
copy_menu.Append(popupID, self.scene.frame.transferFunctions[i].label)
self.Bind(wx.EVT_MENU, self.OnPopUp, id=popupID)
self.copy_ids.append(popupID)
menu.Append(self.puNewID, "New...")
self.Bind(wx.EVT_MENU, self.OnPopUp, id=self.puNewID)
self.PopupMenu(menu)
menu.Destroy()
def OnPopUp(self, evt):
if evt.GetId() == self.puNewID:
dlg = wx.TextEntryDialog(self, 'Set Name of new TF:', 'TF', 'TF')
dlg.SetValue('Untitled')
if dlg.ShowModal() == wx.ID_OK:
name = dlg.GetValue()
colors = []
index = len(self.scene.frame.transferFunctions)
slices = manta_new(vector_ColorSlice());
for i in range(len(self.transferF.colors)):
c = self.transferF.colors[i]
slices.push_back(ColorSlice(c[0],RGBAColor(Color(RGBColor(c[1],c[2],c[3])), c[4])))
colors.append(c)
cmap = self.scene.frame.sphereVolCMaps.append(manta_new(RGBAColorMap(slices, 64)))
t = TransferF(self.scene.frame, colors, index, name, cmap)
self.scene.frame.transferFunctions.append(t)
self.SetTransferF(t)
else:
for i in range(len(self.ids)):
if evt.GetId() == self.ids[i]:
self.SetTransferF(self.scene.frame.transferFunctions[i])
for i in range(len(self.copy_ids)):
if evt.GetId() == self.copy_ids[i]:
#copy transfer function to current transfer function
if i < 7:
temp = TransferF(self, [], -1, "GreyScale", manta_new(RGBAColorMap(i)))
self.transferF.Clone(temp)
else:
self.transferF.Clone(self.scene.frame.transferFunctions[i- 7])
self.transferFPanel.histogramGroup.SetTransferF(self.transferF)
self.transferFPanel.UpdateHistogram()
self.transferFPanel.Update()
def OnClickNew(self, evt):
self.transferFPanel.AddNewColor()
def OnClickDelete(self, evt):
self.transferFPanel.DeleteSelected()
def OnClickModify(self, evt):
self.transferFPanel.ChooseColorSelected()
def SetLabel(self, label):
self.box1_title.SetLabel(label)
def SetTransferF(self, transferF):
self.transferF = transferF
self.transferFPanel.SetTransferF(transferF)
label = transferF.label
if self.transferFPanel.histogramGroup != None:
label = str(label) + str(" : ") + str(self.transferFPanel.histogramGroup.title)
self.SetLabel(label)
if self.transferFPanel.histogramGroup != None:
self.transferFPanel.histogramGroup.SetTransferF(transferF)
def SetUpdateFunction(self, function):
None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Base shape class for pyprototypr
"""
# lib
import copy
import json
import math
# third party
from reportlab.pdfgen import canvas as reportlab_canvas
from reportlab.lib.units import cm, inch
from reportlab.lib.pagesizes import (
A6, A5, A4, A3, A2, A1, A0, LETTER, LEGAL, ELEVENSEVENTEEN,
letter, legal, elevenSeventeen, B6, B5, B4, B3, B2, B0, landscape)
from reportlab.lib.utils import ImageReader
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.colors import (
aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black,
blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse,
chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue,
darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki,
darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon,
darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise,
darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue,
floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod,
gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue,
lightcoral, lightcyan, lightgoldenrodyellow, lightgreen, lightgrey,
lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray,
lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen,
magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise,
mediumvioletred, midnightblue, mintcream, mistyrose, moccasin,
navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid,
palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip,
peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna,
silver, skyblue, slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white,
whitesmoke, yellow, yellowgreen, fidblue, fidred, fidlightblue,
cornflower, firebrick)
# local
from pyprototypr.utils import tools
DEBUG = False
UNITS = {
"cm": cm,
"inch": inch
}
COLORS = {
"aliceblue": aliceblue,
"antiquewhite": antiquewhite,
"aqua": aqua,
"aquamarine": aquamarine,
"azure": azure,
"beige": beige,
"bisque": bisque,
"black": black,
"blanchedalmond": blanchedalmond,
"blue": blue,
"blueviolet": blueviolet,
"brown": brown,
"burlywood": burlywood,
"cadetblue": cadetblue,
"chartreuse": chartreuse,
"chocolate": chocolate,
"coral": coral,
"cornflower": cornflower,
"cornflowerblue": cornflowerblue,
"cornsilk": cornsilk,
"crimson": crimson,
"cyan": cyan,
"darkblue": darkblue,
"darkcyan": darkcyan,
"darkgoldenrod": darkgoldenrod,
"darkgray": darkgray,
"darkgreen": darkgreen,
"darkgrey": darkgrey,
"darkkhaki": darkkhaki,
"darkmagenta": darkmagenta,
"darkolivegreen": darkolivegreen,
"darkorange": darkorange,
"darkorchid": darkorchid,
"darkred": darkred,
"darksalmon": darksalmon,
"darkseagreen": darkseagreen,
"darkslateblue": darkslateblue,
"darkslategray": darkslategray,
"darkslategrey": darkslategrey,
"darkturquoise": darkturquoise,
"darkviolet": darkviolet,
"deeppink": deeppink,
"deepskyblue": deepskyblue,
"dimgray": dimgray,
"dimgrey": dimgrey,
"dodgerblue": dodgerblue,
"fidblue": fidblue,
"fidlightblue": fidlightblue,
"fidred": fidred,
"firebrick": firebrick,
"floralwhite": floralwhite,
"forestgreen": forestgreen,
"fuchsia": fuchsia,
"gainsboro": gainsboro,
"ghostwhite": ghostwhite,
"goldenrod": goldenrod,
"gold": gold,
"gray": gray,
"green": green,
"greenyellow": greenyellow,
"grey": grey,
"honeydew": honeydew,
"hotpink": hotpink,
"indianred": indianred,
"indigo": indigo,
"ivory": ivory,
"khaki": khaki,
"lavenderblush": lavenderblush,
"lavender": lavender,
"lawngreen": lawngreen,
"lemonchiffon": lemonchiffon,
"lightblue": lightblue,
"lightcoral": lightcoral,
"lightcyan": lightcyan,
"lightgoldenrodyellow": lightgoldenrodyellow,
"lightgreen": lightgreen,
"lightgrey": lightgrey,
"lightpink": lightpink,
"lightsalmon": lightsalmon,
"lightseagreen": lightseagreen,
"lightskyblue": lightskyblue,
"lightslategray": lightslategray,
"lightslategrey": lightslategrey,
"lightsteelblue": lightsteelblue,
"lightyellow": lightyellow,
"limegreen": limegreen,
"lime": lime,
"linen": linen,
"magenta": magenta,
"maroon": maroon,
"mediumaquamarine": mediumaquamarine,
"mediumblue": mediumblue,
"mediumorchid": mediumorchid,
"mediumpurple": mediumpurple,
"mediumseagreen": mediumseagreen,
"mediumslateblue": mediumslateblue,
"mediumspringgreen": mediumspringgreen,
"mediumturquoise": mediumturquoise,
"mediumvioletred": mediumvioletred,
"midnightblue": midnightblue,
"mintcream": mintcream,
"mistyrose": mistyrose,
"moccasin": moccasin,
"navajowhite": navajowhite,
"navy": navy,
"oldlace": oldlace,
"olivedrab": olivedrab,
"olive": olive,
"orange": orange,
"orangered": orangered,
"orchid": orchid,
"palegoldenrod": palegoldenrod,
"palegreen": palegreen,
"paleturquoise": paleturquoise,
"palevioletred": palevioletred,
"papayawhip": papayawhip,
"peachpuff": peachpuff,
"peru": peru,
"pink": pink,
"plum": plum,
"powderblue": powderblue,
"purple": purple,
"red": red,
"rosybrown": rosybrown,
"royalblue": royalblue,
"saddlebrown": saddlebrown,
"salmon": salmon,
"sandybrown": sandybrown,
"seagreen": seagreen,
"seashell": seashell,
"sienna": sienna,
"silver": silver,
"skyblue": skyblue,
"slateblue": slateblue,
"slategray": slategray,
"slategrey": slategrey,
"snow": snow,
"springgreen": springgreen,
"steelblue": steelblue,
"tan": tan,
"teal": teal,
"thistle": thistle,
"tomato": tomato,
"turquoise": turquoise,
"violet": violet,
"wheat": wheat,
"whitesmoke": whitesmoke,
"white": white,
"yellowgreen": yellowgreen,
"yellow": yellow,
}
PAGES = {
"LETTER": LETTER,
"landscape": landscape,
"legal": legal,
"A1": A1,
"A0": A0,
"A3": A3,
"A2": A2,
"A5": A5,
"A4": A4,
"A6": A6,
"elevenSeventeen": elevenSeventeen,
"LEGAL": LEGAL,
"letter": letter,
"B4": B4,
"B5": B5,
"B6": B6,
"B0": B0,
"B2": B2,
"B3": B3,
"ELEVENSEVENTEEN": ELEVENSEVENTEEN,
}
WIDTH = 0.1
class BaseCanvas:
"""Wrapper/extended class for a ReportLab canvas."""
def __init__(self, filename=None, pagesize=None, **kwargs):
self.canvas = reportlab_canvas.Canvas(
filename=filename, pagesize=pagesize or A4)
self.jsonfile = kwargs.get('defaults', None)
self.defaults = {}
# override
if self.jsonfile:
try:
with open(self.jsonfile) as data_file:
self.defaults = json.load(data_file)
except IOError:
tools.feedback('Unable to find or load the file: "%s"' %
self.jsonfile)
except ValueError:
tools.feedback('Unable to load data from the file: "%s"' %
self.jsonfile)
# constants
self.default_length = 1
self.show_id = False # True
# general
self.shape = self.defaults.get('shape', 'rectangle')
self.shape_id = None
self.sequence = self.defaults.get('sequence', [])
self.dataset = []
self.members = [] # card IDs, of which current card is a member
self._object = None
self.kwargs = kwargs
# page
self.pagesize = self.get_page(self.defaults.get('pagesize'), A4)
self.margin = self.defaults.get('margin', 1)
self.margin_top = self.defaults.get('margin_top', self.margin)
self.margin_bottom = self.defaults.get('margin_bottom', self.margin)
self.margin_left = self.defaults.get('margin_left', self.margin)
self.margin_right = self.defaults.get('margin_right', self.margin)
self.grid_marks = self.defaults.get('grid_marks', 0)
self.grid_color = self.get_color(
self.defaults.get('grid_color'), grey)
self.grid_stroke_width = self.defaults.get('grid_stroke_width', WIDTH)
self.grid_length = self.defaults.get('grid_length', 0.33)
# sizes and positions
self.units = self.get_units(self.defaults.get('units'), cm)
self.row = self.defaults.get('row', None)
self.col = self.defaults.get('col', self.defaults.get('column', None))
self.height = self.defaults.get('height', 1)
self.width = self.defaults.get('width', 1)
self.size = self.defaults.get('size', None) # proxy for equal H/W
self.x = self.defaults.get('x', self.defaults.get('left', 1))
self.y = self.defaults.get('y', self.defaults.get('bottom', 1))
self.cx = self.defaults.get('cx', None)
self.cy = self.defaults.get('cy', None)
# repeats
self.offset = self.defaults.get('offset', 0)
self.offset_across = self.defaults.get('offset_across', self.offset)
self.offset_down = self.defaults.get('offset_down', self.offset)
self.gap = self.defaults.get('gap', 0)
self.gap_across = self.defaults.get('gap_across', self.gap)
self.gap_down = self.defaults.get('gap_down', self.gap)
# rotate in degrees
self.rotate = self.defaults.get('rotate',
self.defaults.get('rotation', 0))
self.orientation = self.defaults.get('orientation', 'vertical')
self.position = self.defaults.get('position', None)
# line
self.line_color = self.defaults.get('line_color', WIDTH)
self.line_width = self.defaults.get('line_width', WIDTH)
self.line_dots = self.defaults.get('line_dots',
self.defaults.get('dots',
False))
self.line_dashes = self.defaults.get('line_dashes',
self.defaults.get('dashes',
False))
self.line_dotdash = self.defaults.get('line_dotdash',
self.defaults.get('dotdash',
None))
# color and fill
fill = self.defaults.get('fill', self.defaults.get('fill_color'))
self.fill = self.get_color(fill, white)
self.pattern = self.defaults.get('pattern', None)
self.repeat = self.defaults.get('repeat', True)
# text
self.align = self.defaults.get('align', 'centre') # left,right,justify
self._alignment = TA_LEFT # see to_alignment()
self.font_face = self.defaults.get('font_face', 'Helvetica')
self.font_size = self.defaults.get('font_size', 12)
self.label_size = self.defaults.get('label_size', self.font_size)
self.title_size = self.defaults.get('title_size', self.font_size)
self.heading_size = self.defaults.get('heading_size', self.font_size)
self.text = self.defaults.get('text', '')
self.label = self.defaults.get('label', '')
self.title = self.defaults.get('title', '')
self.heading = self.defaults.get('heading', '')
self.style = self.defaults.get('style', None) # Normal? from reportlab
self.wrap = self.defaults.get('wrap', False)
# text block
self.outline_color = self.defaults.get('outline_color', self.fill)
self.outline_width = self.defaults.get('outline_width', 0)
self.leading = self.defaults.get('leading', 12)
# line colors
stroke = self.defaults.get('stroke', self.defaults.get('stroke_color'))
self.stroke = self.get_color(stroke, black)
self.stroke_width = self.defaults.get('stroke_width', WIDTH)
self.stroke_text = self.get_color(
self.defaults.get('stroke_text'), self.stroke)
self.stroke_label = self.get_color(
self.defaults.get('stroke_label'), self.stroke)
self.stroke_title = self.get_color(
self.defaults.get('stroke_title'), self.stroke)
self.stroke_heading = self.get_color(
self.defaults.get('stroke_heading'), self.stroke)
# line and fill
self.transparent = self.defaults.get('transparent', False)
# image / file
self.source = self.defaults.get('source', None) # file or http://
# line / ellipse / bezier
self.length = self.defaults.get('length', 0)
self.angle = self.defaults.get('angle', 0)
self.x_1 = self.defaults.get('x1', 1)
self.y_1 = self.defaults.get('y1', 1)
# bezier
self.x_2 = self.defaults.get('x2', 1)
self.y_2 = self.defaults.get('y2', 1)
self.x_3 = self.defaults.get('x3', 1)
self.y_3 = self.defaults.get('y3', 1)
# rect / card
self.rounding = self.defaults.get('rounding', 0)
self.rounded = self.defaults.get('rounded', False)
# grid / card layout
self.rows = self.defaults.get('rows', 1)
self.cols = self.defaults.get('cols', self.defaults.get('columns', 1))
# circle / star / poly
self.radius = self.defaults.get('radius', 1)
self.vertices = self.defaults.get('vertices', 5)
self.sides = self.defaults.get('sides', 6)
self.points = self.defaults.get('points', [])
# hexes
self.side = self.defaults.get('side', 1) # length of sides
self.dot_color = self.get_color(self.defaults.get('dot_color'), black)
self.dot_size = self.defaults.get('dot_size', 0)
self.hid = self.defaults.get('id', '') # HEX ID
def get_canvas(self):
"""Return reportlab canvas object"""
return self.canvas
def get_color(self, name=None, default=black):
"""Get a color by name from a pre-defined dictionary."""
if name:
return COLORS.get(name, default)
else:
return default
def get_units(self, name=None, default=cm):
"""Get units by name from a pre-defined dictionary."""
if name:
return UNITS.get(name, default)
else:
return default
def get_page(self, name=None, default=A4):
"""Get a page-size by name from a pre-defined dictionary."""
if name:
return PAGES.get(name, default)
else:
return default
class BaseShape:
"""Base class for objects that are drawn on a given canvas."""
def __init__(self, _object=None, canvas=None, **kwargs):
# constants
self.default_length = 1
self.show_id = False # True
# KEY
self.canvas = canvas or BaseCanvas() # BaseCanvas object
cnv = self.canvas # shortcut for use in getting defaults
#print("base_395 Base types", type(self.canvas),type(canvas),type(cnv))
self._object = _object # placeholder for an incoming Shape object
self.kwargs = kwargs
self.shape_id = None
self.stylesheet = getSampleStyleSheet()
self.sequence = kwargs.get('sequence', []) # e.g. card numbers
self.dataset = [] # list of dict data (loaded from file)
self.members = [] # card IDs, of which current card is a member
# general
self.common = kwargs.get('common', None)
self.shape = kwargs.get('shape', cnv.shape)
# page
self.pagesize = kwargs.get('pagesize', cnv.pagesize)
self.margin = kwargs.get('margin', cnv.margin)
self.margin_top = kwargs.get('margin_top', cnv.margin_top)
self.margin_bottom = kwargs.get('margin_bottom', cnv.margin_bottom)
self.margin_left = kwargs.get('margin_left', cnv.margin_left)
self.margin_right = kwargs.get('margin_right', cnv.margin_right)
self.grid_marks = kwargs.get('grid_marks', cnv.grid_marks)
self.grid_color = kwargs.get('grid_color', cnv.grid_color)
self.grid_stroke_width = kwargs.get('grid_stroke_width',
cnv.grid_stroke_width)
self.grid_length = kwargs.get('grid_length', cnv.grid_length)
# sizes and positions
self.units = kwargs.get('units', cnv.units)
self.row = kwargs.get('row', cnv.row)
self.col = kwargs.get('col', kwargs.get('column', cnv.col))
self.height = kwargs.get('height', cnv.height)
self.width = kwargs.get('width', cnv.width)
self.size = kwargs.get('size', cnv.size) # for equal height/width
self.x = kwargs.get('x', kwargs.get('left', cnv.x))
self.y = kwargs.get('y', kwargs.get('bottom', cnv.y))
self.cx = kwargs.get('cx', cnv.cx)
self.cy = kwargs.get('cy', cnv.cy)
# repeats
self.offset = kwargs.get('offset', cnv.offset)
self.offset_across = kwargs.get('offset_down', cnv.offset_down)
self.offset_down = kwargs.get('offset_across', cnv.offset_across)
self.gap = kwargs.get('gap', cnv.gap)
self.gap_across = kwargs.get('gap_down', cnv.gap_down)
self.gap_down = kwargs.get('gap_across', cnv.gap_across)
# rotate in degrees
self.rotate = kwargs.get('rotate', kwargs.get('rotation', cnv.rotate))
self._rotate_theta = self.rotate * math.pi / 180.0 # radians
self.orientation = kwargs.get('orientation', cnv.orientation)
self.position = kwargs.get('position', cnv.position)
# line
self.line_width = kwargs.get('line_width', cnv.line_width)
self.line_dots = kwargs.get('line_dots',
kwargs.get('dots', cnv.line_dots))
self.line_dashes = kwargs.get('line_dashes',
kwargs.get('dashes', cnv.line_dashes))
self.line_dotdash = kwargs.get('line_dotdash',
kwargs.get('dotdash', cnv.line_dotdash))
# text
self.align = kwargs.get('align', cnv.align) # left, right, justify
self._alignment = TA_LEFT # see to_alignment()
self.font_face = kwargs.get('font_face', cnv.font_face)
self.font_size = kwargs.get('font_size', cnv.font_size)
self.label_size = kwargs.get('label_size', cnv.label_size)
self.title_size = kwargs.get('title_size', cnv.title_size)
self.heading_size = kwargs.get('heading_size', cnv.heading_size)
self.text = kwargs.get('text', cnv.text)
self.label = kwargs.get('label', cnv.label)
self.title = kwargs.get('title', cnv.title)
self.heading = kwargs.get('heading', cnv.heading)
self.style = kwargs.get('style', cnv.style) # Normal? from reportlab
self.wrap = kwargs.get('wrap', cnv.wrap)
# text block
self.outline_color = kwargs.get('outline_color', cnv.outline_color)
self.outline_width = kwargs.get('outline_width', cnv.outline_width)
self.leading = kwargs.get('leading', cnv.leading)
# color and fill
self.fill = kwargs.get('fill', kwargs.get('fill_color', cnv.fill))
self.pattern = kwargs.get('pattern', cnv.pattern)
self.repeat = kwargs.get('repeat', cnv.repeat)
# lines
self.stroke = kwargs.get('stroke', kwargs.get('stroke_color',
cnv.stroke))
self.stroke_width = kwargs.get('stroke_width', cnv.stroke_width)
self.stroke_text = kwargs.get('stroke_text', cnv.stroke_text)
self.stroke_label = kwargs.get('stroke_label', cnv.stroke_label)
self.stroke_title = kwargs.get('stroke_title', cnv.stroke_title)
self.stroke_heading = kwargs.get('stroke_heading', cnv.stroke_heading)
# line and fill
self.transparent = kwargs.get('transparent', cnv.transparent)
# image / file
self.source = kwargs.get('source', cnv.source) # file or http://
# line / ellipse / bezier
self.length = kwargs.get('length', cnv.length)
self.angle = kwargs.get('angle', cnv.angle) # anti-clock from flat
self._angle_theta = self.angle * math.pi / 180.0 # radians
self.x_1 = kwargs.get('x1', cnv.x_1)
self.y_1 = kwargs.get('y1', cnv.y_1)
# bezier
self.x_2 = kwargs.get('x2', cnv.x_2)
self.y_2 = kwargs.get('y2', cnv.y_2)
self.x_3 = kwargs.get('x3', cnv.x_3)
self.y_3 = kwargs.get('y3', cnv.y_3)
# rect / card
self.rounding = kwargs.get('rounding', cnv.rounding)
self.rounded = kwargs.get('rounded', cnv.rounded)
# grid / card layout
self.rows = kwargs.get('rows', cnv.rows)
self.cols = kwargs.get('cols', kwargs.get('columns', cnv.cols))
# circle / star / poly
self.radius = kwargs.get('radius', cnv.radius)
self.vertices = kwargs.get('vertices', cnv.vertices)
self.sides = kwargs.get('sides', cnv.sides)
self.points = kwargs.get('points', cnv.points)
# hexes
self.side = kwargs.get('side', cnv.side) # length of sides
self.dot_color = kwargs.get('dot_color', cnv.dot_color)
self.dot_size = kwargs.get('dot_size', cnv.dot_size)
self.hid = kwargs.get('id', cnv.hid) # HEX ID
# CHECK
correct, issue = self.check_settings()
if not correct:
tools.feedback("Problem with settings: %s" % issue)
# UPDATE SELF WITH COMMON
if self.common:
attrs = vars(self.common)
for attr in attrs.keys():
if attr not in ['canvas', 'common', 'stylesheet'] and \
attr[0] != '_':
common_attr = getattr(self.common, attr)
base_attr = getattr(BaseCanvas(), attr)
if common_attr != base_attr:
setattr(self, attr, common_attr)
def unit(self, item, units=None, skip_none=False):
"""Convert an item into the appropriate unit system."""
#print("base_509", units, self.units)
if item is None and skip_none:
return None
else:
if not units:
units = self.units
try:
return item * units
except (TypeError, ValueError):
tools.feedback(
'Unable to set units: "%s".'
' Please check that this is a valid value.' % item,
stop=True)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw an element on a given canvas."""
raise NotImplementedError
def set_canvas_props(self, cnv=None, fill=None,
stroke=None, stroke_width=None):
"""Set reportlab canvas properties for font and colors"""
canvas = cnv if cnv else self.canvas.canvas
#print('scp: ', self.font_face, self.font_size)
#print('scp: stroke / self.stroke', stroke, self.stroke)
try:
canvas.setFont(self.font_face, self.font_size)
except AttributeError:
pass
except KeyError:
tools.feedback(
'Unable to find font: "%s".'
' Please check that this is installed on your system.' %
self.font_face,
stop=True)
try:
canvas.setFillColor(fill or self.fill)
except AttributeError:
pass
try:
canvas.setStrokeColor(stroke or self.stroke)
except AttributeError:
pass
try:
canvas.setLineWidth(stroke_width or self.stroke_width)
except AttributeError:
pass
if self.line_dashes:
canvas.setDash(array=[6, 2])
if self.line_dots:
canvas.setDash(array=[1, 1])
if self.line_dotdash:
canvas.setDash(array=self.line_dotdash)
def check_settings(self):
"""Check that the user-supplied parameters are correct"""
correct = True
issue = []
if self.position:
if str(self.position).lower() not in \
['top', 'bottom', 'center', 'middle']:
issue.append('Invalid position!')
correct = False
if self.align:
if str(self.align).lower() not in \
['left', 'right', 'justify', 'centre']:
issue.append('Invalid align!')
correct = False
if self.orientation:
if str(self.orientation).lower() not in \
['vertical', 'horizontal']:
issue.append('Invalid orientation!')
correct = False
return correct, issue
def to_alignment(self):
"""Convert local, English-friendly alignments to reportlab enums."""
if self.align == 'centre':
self._alignment = TA_CENTER
elif self.align == 'right':
self._alignment = TA_RIGHT
elif self.align == 'justify':
self._alignment = TA_JUSTIFY
else:
self._alignment = TA_LEFT
return self._alignment
def load_image(self, source=None):
"""Load an image from file or website."""
img = None
if source:
try:
img = ImageReader(source)
except IOError:
tools.feedback('Unable to find or open image: "%s"' %
self.source)
return img
def process_template(self, _dict):
"""Set values for properties based on those defined in a dictionary."""
if _dict.get('x'):
self.x = _dict.get('x', 1)
if _dict.get('y'):
self.y = _dict.get('x', 1)
if _dict.get('height'):
self.height = _dict.get('height', 1)
if _dict.get('width'):
self.width = _dict.get('width', 1)
if _dict.get('radius'):
self.radius = _dict.get('radius', 1)
if _dict.get('rounding'):
self.rounding = _dict.get('rounding', None)
#if _dict.get('x'):
# self.x = _dict.get('x', 1)
def get_center(self):
"""Attempt to get centre (x,y) tuple for a shape."""
if self.cx and self.cy:
return (self.cx, self.cy)
if self.x and self.y and self.width and self.height:
return (self.x + self.width / 2.0, self.y + self.height / 2.0)
return None
def get_edges(self):
"""Attempt to get edges of rectangle."""
if self.x and self.y and self.width and self.height:
edges = {
'left': self.x,
'right': self.x + self.width,
'bottom': self.y,
'top': self.y + self.height
}
return edges
return {}
def textify(self, index=None, text=None):
"""Extract text from a list, or create string, based on index & type"""
_text = text or self.text
#print("base_645 text", index, text, _text, type(_text))
if not _text:
return
if hasattr(_text, 'lower'):
return _text
else:
try:
return _text[index]
except TypeError:
return _text
def draw_multi_string(self, canvas, x, y, string, align=None):
"""Draw a string, split if needed, with a given alignment."""
if not string:
return
align = align or self.align
mvy = copy.copy(y)
#print("base_655 string", type(string), string)
for ln in string.split('\n'):
if align == 'centre':
canvas.drawCentredString(x, mvy, ln)
elif align == 'right':
canvas.drawRightString(x, mvy, ln)
else:
canvas.drawString(x, mvy, ln)
mvy -= canvas._leading
def draw_label(self, canvas, x, y, string, align=None):
self.draw_multi_string(canvas=canvas, x=y, y=y, string=string,
align=align)
def V(self, *args):
"""Placeholder for value evaluator."""
try:
return self.dataset[self.shape_id].get(args[0], '')
except:
if not self.shape_id:
tools.feedback('No ID - unable to locate item!')
elif self.dataset[self.shape_id]:
tools.feedback('Unable to locate item #%s in dataset!' %
self.shape_id)
else:
tools.feedback('Unable to locate column %s!' % args[0])
return ''
def Q(self, *args):
"""Placeholder for query evaluator."""
tools.feedback('TO DO! %s' % args)
class GroupBase(list):
"""Class for group base."""
def __init__(self, *args):
list.__init__(self, *args)
| |
#!/usr/bin/env python
#
# Copyright (c) 2004-2009 Canonical
#
# Authors: Michael Vogt <michael.vogt@ubuntu.com>
# Sebastian Heinlein <glatzor@ubuntu.com>
# Julian Andres Klode <jak@debian.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""GObject-powered progress classes and a GTK+ status widget."""
from __future__ import print_function
import pygtk
pygtk.require('2.0')
import gtk
try:
import glib
glib # pyflakes
except ImportError:
import gobject as glib
import gobject
import pango
import time
import vte
import apt_pkg
from apt_pkg import gettext as _
from apt.progress import base
__all__ = ['GAcquireProgress', 'GInstallProgress', 'GOpProgress',
'GtkAptProgress']
def mksig(params=(), run=gobject.SIGNAL_RUN_FIRST, rettype=gobject.TYPE_NONE):
"""Simplified Create a gobject signal.
This allows us to write signals easier, because we just need to define the
type of the parameters (in most cases).
``params`` is a tuple which defines the types of the arguments.
"""
return (run, rettype, params)
class GOpProgress(gobject.GObject, base.OpProgress):
"""Operation progress with GObject signals.
Signals:
* status-changed(str: operation, int: percent)
* status-started() - Not Implemented yet
* status-finished()
"""
__gsignals__ = {"status-changed": mksig((str, int)),
"status-started": mksig(),
"status-finished": mksig()}
def __init__(self):
base.OpProgress.__init__(self)
gobject.GObject.__init__(self)
self._context = glib.main_context_default()
def update(self, percent=None):
"""Called to update the percentage done"""
base.OpProgress.update(self, percent)
self.emit("status-changed", self.op, self.percent)
while self._context.pending():
self._context.iteration()
def done(self):
"""Called when all operation have finished."""
base.OpProgress.done(self)
self.emit("status-finished")
class GInstallProgress(gobject.GObject, base.InstallProgress):
"""Installation progress with GObject signals.
Signals:
* status-changed(str: status, int: percent)
* status-started()
* status-finished()
* status-timeout()
* status-error()
* status-conffile()
"""
# Seconds until a maintainer script will be regarded as hanging
INSTALL_TIMEOUT = 5 * 60
__gsignals__ = {"status-changed": mksig((str, int)),
"status-started": mksig(),
"status-timeout": mksig(),
"status-error": mksig(),
"status-conffile": mksig(),
"status-finished": mksig()}
def __init__(self, term):
base.InstallProgress.__init__(self)
gobject.GObject.__init__(self)
self.finished = False
self.apt_status = -1
self.time_last_update = time.time()
self.term = term
self.term.connect("child-exited", self.child_exited)
self.env = ["VTE_PTY_KEEP_FD=%s" % self.writefd,
"DEBIAN_FRONTEND=gnome",
"APT_LISTCHANGES_FRONTEND=gtk"]
self._context = glib.main_context_default()
def child_exited(self, term):
"""Called when a child process exits"""
self.apt_status = term.get_child_exit_status()
self.finished = True
def error(self, pkg, errormsg):
"""Called when an error happens.
Emits: status-error()
"""
self.emit("status-error")
def conffile(self, current, new):
"""Called during conffile.
Emits: status-conffile()
"""
self.emit("status-conffile")
def start_update(self):
"""Called when the update starts.
Emits: status-started()
"""
self.emit("status-started")
def run(self, obj):
"""Run."""
self.finished = False
return base.InstallProgress.run(self, obj)
def finish_update(self):
"""Called when the update finished.
Emits: status-finished()
"""
self.emit("status-finished")
def processing(self, pkg, stage):
"""Called when entering a new stage in dpkg."""
# We have no percentage or alike, send -1 to let the bar pulse.
self.emit("status-changed", ("Installing %s...") % pkg, -1)
def status_change(self, pkg, percent, status):
"""Called when the status changed.
Emits: status-changed(status, percent)
"""
self.time_last_update = time.time()
self.emit("status-changed", status, percent)
def update_interface(self):
"""Called periodically to update the interface.
Emits: status-timeout() [When a timeout happens]
"""
base.InstallProgress.update_interface(self)
while self._context.pending():
self._context.iteration()
if self.time_last_update + self.INSTALL_TIMEOUT < time.time():
self.emit("status-timeout")
def fork(self):
"""Fork the process."""
return self.term.forkpty(envv=self.env)
def wait_child(self):
"""Wait for the child process to exit."""
while not self.finished:
self.update_interface()
time.sleep(0.02)
return self.apt_status
GDpkgInstallProgress = GInstallProgress
class GAcquireProgress(gobject.GObject, base.AcquireProgress):
"""A Fetch Progress with GObject signals.
Signals:
* status-changed(str: description, int: percent)
* status-started()
* status-finished()
DEPRECATED.
"""
__gsignals__ = {"status-changed": mksig((str, int)),
"status-started": mksig(),
"status-finished": mksig()}
def __init__(self):
base.AcquireProgress.__init__(self)
gobject.GObject.__init__(self)
self._continue = True
self._context = glib.main_context_default()
def start(self):
base.AcquireProgress.start(self)
self.emit("status-started")
def stop(self):
base.AcquireProgress.stop(self)
self.emit("status-finished")
def cancel(self):
self._continue = False
def pulse(self, owner):
base.AcquireProgress.pulse(self, owner)
current_item = self.current_items + 1
if current_item > self.total_items:
current_item = self.total_items
if self.current_cps > 0:
text = (_("Downloading file %(current)li of %(total)li with "
"%(speed)s/s") %
{"current": current_item,
"total": self.total_items,
"speed": apt_pkg.size_to_str(self.current_cps)})
else:
text = (_("Downloading file %(current)li of %(total)li") %
{"current": current_item,
"total": self.total_items})
percent = (((self.current_bytes + self.current_items) * 100.0) /
float(self.total_bytes + self.total_items))
self.emit("status-changed", text, percent)
while self._context.pending():
self._context.iteration()
return self._continue
class GtkAptProgress(gtk.VBox):
"""Graphical progress for installation/fetch/operations.
This widget provides a progress bar, a terminal and a status bar for
showing the progress of package manipulation tasks.
"""
def __init__(self):
gtk.VBox.__init__(self)
self.set_spacing(6)
# Setup some child widgets
self._expander = gtk.Expander(_("Details"))
self._terminal = vte.Terminal()
#self._terminal.set_font_from_string("monospace 10")
self._expander.add(self._terminal)
self._progressbar = gtk.ProgressBar()
# Setup the always italic status label
self._label = gtk.Label()
attr_list = pango.AttrList()
attr_list.insert(pango.AttrStyle(pango.STYLE_ITALIC, 0, -1))
self._label.set_attributes(attr_list)
self._label.set_ellipsize(pango.ELLIPSIZE_END)
self._label.set_alignment(0, 0)
# add child widgets
self.pack_start(self._progressbar, False)
self.pack_start(self._label, False)
self.pack_start(self._expander, False)
# Setup the internal progress handlers
self._progress_open = GOpProgress()
self._progress_open.connect("status-changed", self._on_status_changed)
self._progress_open.connect("status-started", self._on_status_started)
self._progress_open.connect("status-finished",
self._on_status_finished)
self._progress_acquire = GAcquireProgress()
self._progress_acquire.connect("status-changed",
self._on_status_changed)
self._progress_acquire.connect("status-started",
self._on_status_started)
self._progress_acquire.connect("status-finished",
self._on_status_finished)
self._progress_fetch = None
self._progress_install = GInstallProgress(self._terminal)
self._progress_install.connect("status-changed",
self._on_status_changed)
self._progress_install.connect("status-started",
self._on_status_started)
self._progress_install.connect("status-finished",
self._on_status_finished)
self._progress_install.connect("status-timeout",
self._on_status_timeout)
self._progress_install.connect("status-error",
self._on_status_timeout)
self._progress_install.connect("status-conffile",
self._on_status_timeout)
def clear(self):
"""Reset all status information."""
self._label.set_label("")
self._progressbar.set_fraction(0)
self._expander.set_expanded(False)
@property
def open(self):
"""Return the cache opening progress handler."""
return self._progress_open
@property
def install(self):
"""Return the install progress handler."""
return self._progress_install
@property
def dpkg_install(self):
"""Return the install progress handler for dpkg."""
return self._progress_install
@property
def acquire(self):
"""Return the acquire progress handler."""
return self._progress_acquire
def _on_status_started(self, progress):
"""Called when something starts."""
self._on_status_changed(progress, _("Starting..."), 0)
while gtk.events_pending():
gtk.main_iteration()
def _on_status_finished(self, progress):
"""Called when something finished."""
self._on_status_changed(progress, _("Complete"), 100)
while gtk.events_pending():
gtk.main_iteration()
def _on_status_changed(self, progress, status, percent):
"""Called when the status changed."""
self._label.set_text(status)
if percent is None or percent == -1:
self._progressbar.pulse()
else:
self._progressbar.set_fraction(percent / 100.0)
while gtk.events_pending():
gtk.main_iteration()
def _on_status_timeout(self, progress):
"""Called when timeout happens."""
self._expander.set_expanded(True)
while gtk.events_pending():
gtk.main_iteration()
def cancel_download(self):
"""Cancel a currently running download."""
self._progress_fetch.cancel()
def show_terminal(self, expanded=False):
"""Show the expander for the terminal.
Show an expander with a terminal widget which provides a way
to interact with dpkg
"""
self._expander.show()
self._terminal.show()
self._expander.set_expanded(expanded)
while gtk.events_pending():
gtk.main_iteration()
def hide_terminal(self):
"""Hide the expander with the terminal widget."""
self._expander.hide()
while gtk.events_pending():
gtk.main_iteration()
def show(self):
"""Show the Box"""
gtk.HBox.show(self)
self._label.show()
self._progressbar.show()
while gtk.events_pending():
gtk.main_iteration()
def _test():
"""Test function"""
import sys
import apt
from apt.debfile import DebPackage
win = gtk.Window()
apt_progress = GtkAptProgress()
win.set_title("GtkAptProgress Demo")
win.add(apt_progress)
apt_progress.show()
win.show()
cache = apt.cache.Cache(apt_progress.open)
pkg = cache["xterm"]
if pkg.is_installed:
pkg.mark_delete()
else:
pkg.mark_install()
apt_progress.show_terminal(True)
try:
cache.commit(apt_progress.acquire, apt_progress.install)
except Exception as exc:
print("Exception happened:", exc, file=sys.stderr)
if len(sys.argv) > 1:
deb = DebPackage(sys.argv[1], cache)
deb.install(apt_progress.dpkg_install)
win.connect("destroy", gtk.main_quit)
gtk.main()
if __name__ == "__main__":
_test()
# vim: ts=4 et sts=4
| |
# this is the data access layer
import pyes
import json
import uuid
import UserDict
import httplib
import urllib
from datetime import datetime
import hashlib
import logging
from werkzeug import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin
from bibserver.config import config
import bibserver.util, bibserver.auth
log = logging.getLogger(__name__)
def make_id(data):
'''Create a new id for data object based on a hash of the data representation
Ignore the _last_modified, _created fields
##TODO Ignore ALL fields that startswith _
'''
if '_id' in data: return data['_id']
new_data = {}
for k,v in data.items():
if k in ('_last_modified', '_created'): continue
new_data[k] = v
buf = json.dumps(new_data, sort_keys=True)
new_id = hashlib.md5(buf).hexdigest()
return new_id
def init_db():
conn, db = get_conn()
try:
conn.create_index(db)
except:
pass
mappings = config["mappings"]
for mapping in mappings:
host = str(config['ELASTIC_SEARCH_HOST']).rstrip('/')
db_name = config['ELASTIC_SEARCH_DB']
fullpath = '/' + db_name + '/' + mapping + '/_mapping'
c = httplib.HTTPConnection(host)
c.request('GET', fullpath)
result = c.getresponse()
if result.status == 404:
print mapping
c = httplib.HTTPConnection(host)
c.request('PUT', fullpath, json.dumps(mappings[mapping]))
res = c.getresponse()
print res.read()
def get_conn():
host = str(config["ELASTIC_SEARCH_HOST"])
db_name = config["ELASTIC_SEARCH_DB"]
conn = pyes.ES([host])
return conn, db_name
class InvalidDAOIDException(Exception):
pass
class DomainObject(UserDict.IterableUserDict):
# set __type__ on inheriting class to determine elasticsearch object
__type__ = None
def __init__(self, **kwargs):
'''Initialize a domain object with key/value pairs of attributes.
'''
# IterableUserDict expects internal dictionary to be on data attribute
if '_source' in kwargs:
self.data = dict(kwargs['_source'])
self.meta = dict(kwargs)
del self.meta['_source']
else:
self.data = dict(kwargs)
@property
def id(self):
'''Get id of this object.'''
return self.data.get('_id', None)
@property
def version(self):
return self.meta.get('_version', None)
def save(self):
'''Save to backend storage.'''
# TODO: refresh object with result of save
return self.upsert(self.data)
def delete(self):
url = str(config['ELASTIC_SEARCH_HOST'])
loc = config['ELASTIC_SEARCH_DB'] + "/" + self.__type__ + "/" + self.id
conn = httplib.HTTPConnection(url)
conn.request('DELETE', loc)
resp = conn.getresponse()
return ''
@classmethod
def get(cls, id_):
'''Retrieve object by id.'''
if id_ is None:
return None
conn, db = get_conn()
try:
out = conn.get(db, cls.__type__, id_)
return cls(**out)
except pyes.exceptions.ElasticSearchException, inst:
if inst.status == 404:
return None
else:
raise
@classmethod
def get_mapping(cls):
conn, db = get_conn()
return conn.get_mapping(cls.__type__, db)
@classmethod
def get_facets_from_mapping(cls,mapping=False,prefix=''):
# return a sorted list of all the keys in the index
if not mapping:
mapping = cls.get_mapping()[cls.__type__]['properties']
keys = []
for item in mapping:
if mapping[item].has_key('fields'):
for item in mapping[item]['fields'].keys():
if item != 'exact' and not item.startswith('_'):
keys.append(prefix + item + config['facet_field'])
else:
keys = keys + cls.get_facets_from_mapping(mapping=mapping[item]['properties'],prefix=prefix+item+'.')
keys.sort()
return keys
@classmethod
def upsert(cls, data, state=None):
'''Update backend object with a dictionary of data.
If no id is supplied an uuid id will be created before saving.
'''
conn, db = get_conn()
cls.bulk_upsert([data], state)
conn.flush_bulk()
# TODO: should we really do a cls.get() ?
return cls(**data)
@classmethod
def bulk_upsert(cls, dataset, state=None):
'''Bulk update backend object with a list of dicts of data.
If no id is supplied an uuid id will be created before saving.'''
conn, db = get_conn()
for data in dataset:
if not type(data) is dict: continue
if '_id' in data:
id_ = data['_id'].strip()
else:
id_ = make_id(data)
data['_id'] = id_
if '_created' not in data:
data['_created'] = datetime.now().strftime("%Y%m%d%H%M%S")
data['_last_modified'] = datetime.now().strftime("%Y%m%d%H%M%S")
index_result = conn.index(data, db, cls.__type__, urllib.quote_plus(id_), bulk=True)
# refresh required after bulk index
conn.refresh()
@classmethod
def delete_by_query(cls, query):
url = str(config['ELASTIC_SEARCH_HOST'])
loc = config['ELASTIC_SEARCH_DB'] + "/" + cls.__type__ + "/_query?q=" + urllib.quote_plus(query)
conn = httplib.HTTPConnection(url)
conn.request('DELETE', loc)
resp = conn.getresponse()
return resp.read()
@classmethod
def query(cls, q='', terms=None, facet_fields=None, flt=False, default_operator='AND', **kwargs):
'''Perform a query on backend.
:param q: maps to query_string parameter.
:param terms: dictionary of terms to filter on. values should be lists.
:param kwargs: any keyword args as per
http://www.elasticsearch.org/guide/reference/api/search/uri-request.html
'''
conn, db = get_conn()
if not q:
ourq = pyes.query.MatchAllQuery()
else:
if flt:
ourq = pyes.query.FuzzyLikeThisQuery(like_text=q,**kwargs)
else:
ourq = pyes.query.StringQuery(q, default_operator=default_operator)
if terms:
for term in terms:
if isinstance(terms[term],list):
for val in terms[term]:
termq = pyes.query.TermQuery(term, val)
ourq = pyes.query.BoolQuery(must=[ourq,termq])
else:
termq = pyes.query.TermQuery(term, terms[term])
ourq = pyes.query.BoolQuery(must=[ourq,termq])
ourq = ourq.search(**kwargs)
if facet_fields:
for item in facet_fields:
ourq.facet.add_term_facet(item['key'], size=item.get('size',100), order=item.get('order',"count"))
out = conn.search(ourq, db, cls.__type__)
return out
@classmethod
def raw_query(self, query_string):
host = str(config['ELASTIC_SEARCH_HOST']).rstrip('/')
db_path = config['ELASTIC_SEARCH_DB']
fullpath = '/' + db_path + '/' + self.__type__ + '/_search' + '?' + query_string
c = httplib.HTTPConnection(host)
c.request('GET', fullpath)
result = c.getresponse()
# pass through the result raw
return result.read()
class Record(DomainObject):
__type__ = 'record'
class Note(DomainObject):
__type__ = 'note'
@classmethod
def about(cls, id_):
'''Retrieve notes by id of record they are about'''
if id_ is None:
return None
conn, db = get_conn()
res = Note.query(terms={"about":id_})
return [i['_source'] for i in res['hits']['hits']]
class Collection(DomainObject):
__type__ = 'collection'
@property
def records(self):
size = Record.query(terms={'owner':self['owner'],'collection':self['collection']})['hits']['total']
if size != 0:
res = [Record.get(i['_source']['_id']) for i in Record.query(terms={'owner':self['owner'],'collection':self['collection']},size=size)['hits']['hits']]
else: res = []
return res
@classmethod
def get_by_owner_coll(cls,owner,coll):
res = cls.query(terms={'owner':owner,'collection':coll})
if res['hits']['total'] == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
def delete(self):
url = str(config['ELASTIC_SEARCH_HOST'])
loc = config['ELASTIC_SEARCH_DB'] + "/" + self.__type__ + "/" + self.id
print loc
conn = httplib.HTTPConnection(url)
conn.request('DELETE', loc)
resp = conn.getresponse()
print resp.read()
for record in self.records:
record.delete()
def __len__(self):
res = Record.query(terms={'owner':self['owner'],'collection':self['collection']})
return res['hits']['total']
class Account(DomainObject, UserMixin):
__type__ = 'account'
def set_password(self, password):
self.data['password'] = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.data['password'], password)
@property
def is_super(self):
return bibserver.auth.user.is_super(self)
@property
def collections(self):
colls = Collection.query(terms={
'owner': [self.id]
})
colls = [ Collection(**item['_source']) for item in colls['hits']['hits'] ]
return colls
@property
def notes(self):
res = Note.query(terms={
'owner': [self.id]
})
allnotes = [ Note(**item['_source']) for item in res['hits']['hits'] ]
return allnotes
def delete(self):
url = str(config['ELASTIC_SEARCH_HOST'])
loc = config['ELASTIC_SEARCH_DB'] + "/" + self.__type__ + "/" + self.id
conn = httplib.HTTPConnection(url)
conn.request('DELETE', loc)
resp = conn.getresponse()
for coll in self.collections:
coll.delete()
for note in self.notes:
note.delete()
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from feature_statistics_generator import FeatureStatisticsGenerator
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import googletest
class FeatureStatisticsGeneratorTest(googletest.TestCase):
def setUp(self):
self.fs = FeatureStatisticsGenerator()
def testParseExampleInt(self):
# Tests parsing examples of integers
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(1, len(entries))
self.assertIn('num', entries)
info = entries['num']
self.assertEqual(0, info['missing'])
self.assertEqual(self.fs.fs_proto.INT, info['type'])
for i in range(len(examples)):
self.assertEqual(1, info['counts'][i])
self.assertEqual(i, info['vals'][i])
def testParseExampleMissingValueList(self):
# Tests parsing examples of integers
examples = []
example = tf.train.Example()
# pylint: disable=pointless-statement
example.features.feature['str']
# pylint: enable=pointless-statement
examples.append(example)
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'test')
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(1, len(entries))
self.assertIn('str', entries)
info = entries['str']
self.assertEqual(1, info['missing'])
self.assertEqual(self.fs.fs_proto.STRING, info['type'])
self.assertEqual(0, info['counts'][0])
self.assertEqual(1, info['counts'][1])
def _check_sequence_example_entries(self,
entries,
n_examples,
n_features,
feat_len=None):
self.assertIn('num', entries)
info = entries['num']
self.assertEqual(0, info['missing'])
self.assertEqual(self.fs.fs_proto.INT, info['type'])
for i in range(n_examples):
self.assertEqual(n_features, info['counts'][i])
if feat_len is not None:
self.assertEqual(feat_len, info['feat_lens'][i])
for i in range(n_examples * n_features):
self.assertEqual(i, info['vals'][i])
if feat_len is None:
self.assertEqual(0, len(info['feat_lens']))
def testParseExampleSequenceContext(self):
# Tests parsing examples of integers in context field
examples = []
for i in range(50):
example = tf.train.SequenceExample()
example.context.feature['num'].int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 50, 1)
self.assertEqual(1, len(entries))
def testParseExampleSequenceFeatureList(self):
examples = []
for i in range(50):
example = tf.train.SequenceExample()
feat = example.feature_lists.feature_list['num'].feature.add()
feat.int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 50, 1, 1)
def testParseExampleSequenceFeatureListMultipleEntriesInner(self):
examples = []
for i in range(2):
example = tf.train.SequenceExample()
feat = example.feature_lists.feature_list['num'].feature.add()
for j in range(25):
feat.int64_list.value.append(i * 25 + j)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 2, 25, 1)
def testParseExampleSequenceFeatureListMultipleEntriesOuter(self):
# Tests parsing examples of integers in context field
examples = []
for i in range(2):
example = tf.train.SequenceExample()
for j in range(25):
feat = example.feature_lists.feature_list['num'].feature.add()
feat.int64_list.value.append(i * 25 + j)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 2, 25, 25)
def testVaryingCountsAndMissing(self):
# Tests parsing examples of when some examples have missing features
examples = []
for i in range(5):
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
for _ in range(i):
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
info = entries['num']
self.assertEqual(2, info['missing'])
self.assertEqual(4, len(info['counts']))
for i in range(4):
self.assertEqual(i + 1, info['counts'][i])
self.assertEqual(10, len(info['vals']))
def testParseExampleStringsAndFloats(self):
# Tests parsing examples of string and float features
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hi')
example.features.feature['float'].float_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(2, len(entries))
self.assertEqual(self.fs.fs_proto.FLOAT, entries['float']['type'])
self.assertEqual(self.fs.fs_proto.STRING, entries['str']['type'])
for i in range(len(examples)):
self.assertEqual(1, entries['str']['counts'][i])
self.assertEqual(1, entries['float']['counts'][i])
self.assertEqual(i, entries['float']['vals'][i])
self.assertEqual('hi', entries['str']['vals'][i].decode(
'UTF-8', 'strict'))
def testParseExamplesTypeMismatch(self):
examples = []
example = tf.train.Example()
example.features.feature['feat'].int64_list.value.append(0)
examples.append(example)
example = tf.train.Example()
example.features.feature['feat'].bytes_list.value.append(b'str')
examples.append(example)
entries = {}
self.fs._ParseExample(examples[0].features.feature, [], entries, 0)
with self.assertRaises(TypeError):
self.fs._ParseExample(examples[1].features.feature, [], entries, 1)
def testGetDatasetsProtoFromEntriesLists(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.fs.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(self.fs.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
def testGetProtoNums(self):
# Tests converting int examples into the feature stats proto
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('test', test_data.name)
self.assertEqual(51, test_data.num_examples)
numfeat = test_data.features[0] if (
test_data.features[0].name == 'num') else test_data.features[1]
self.assertEqual('num', numfeat.name)
self.assertEqual(self.fs.fs_proto.INT, numfeat.type)
self.assertEqual(0, numfeat.num_stats.min)
self.assertEqual(49, numfeat.num_stats.max)
self.assertEqual(24.5, numfeat.num_stats.mean)
self.assertEqual(24.5, numfeat.num_stats.median)
self.assertEqual(1, numfeat.num_stats.num_zeros)
self.assertAlmostEqual(14.430869689, numfeat.num_stats.std_dev, 4)
self.assertEqual(1, numfeat.num_stats.common_stats.num_missing)
self.assertEqual(50, numfeat.num_stats.common_stats.num_non_missing)
self.assertEqual(1, numfeat.num_stats.common_stats.min_num_values)
self.assertEqual(1, numfeat.num_stats.common_stats.max_num_values)
self.assertAlmostEqual(1, numfeat.num_stats.common_stats.avg_num_values, 4)
hist = numfeat.num_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
self.assertEqual(2, len(numfeat.num_stats.histograms))
buckets = numfeat.num_stats.histograms[0].buckets
self.assertEqual(self.fs.histogram_proto.STANDARD,
numfeat.num_stats.histograms[0].type)
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertAlmostEqual(44.1, buckets[9].low_value)
self.assertEqual(49, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
buckets = numfeat.num_stats.histograms[1].buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES,
numfeat.num_stats.histograms[1].type)
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertAlmostEqual(44.1, buckets[9].low_value)
self.assertEqual(49, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
def testQuantiles(self):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(100)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
self.assertEqual(2, len(numfeat.num_stats.histograms))
self.assertEqual(self.fs.histogram_proto.QUANTILES,
numfeat.num_stats.histograms[1].type)
buckets = numfeat.num_stats.histograms[1].buckets
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(9.9, buckets[0].high_value)
self.assertEqual(10, buckets[0].sample_count)
self.assertEqual(100, buckets[9].low_value)
self.assertEqual(100, buckets[9].high_value)
self.assertEqual(10, buckets[9].sample_count)
def testInfinityAndNan(self):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('-inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('nan'))
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
self.assertEqual('num', numfeat.name)
self.assertEqual(self.fs.fs_proto.FLOAT, numfeat.type)
self.assertTrue(np.isnan(numfeat.num_stats.min))
self.assertTrue(np.isnan(numfeat.num_stats.max))
self.assertTrue(np.isnan(numfeat.num_stats.mean))
self.assertTrue(np.isnan(numfeat.num_stats.median))
self.assertEqual(1, numfeat.num_stats.num_zeros)
self.assertTrue(np.isnan(numfeat.num_stats.std_dev))
self.assertEqual(53, numfeat.num_stats.common_stats.num_non_missing)
hist = buckets = numfeat.num_stats.histograms[0]
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type)
self.assertEqual(1, hist.num_nan)
self.assertEqual(10, len(buckets))
self.assertEqual(float('-inf'), buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(6, buckets[0].sample_count)
self.assertEqual(44.1, buckets[9].low_value)
self.assertEqual(float('inf'), buckets[9].high_value)
self.assertEqual(6, buckets[9].sample_count)
def testInfinitysOnly(self):
examples = []
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('-inf'))
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
hist = buckets = numfeat.num_stats.histograms[0]
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(float('-inf'), buckets[0].low_value)
self.assertEqual(0.1, buckets[0].high_value)
self.assertEqual(1, buckets[0].sample_count)
self.assertEqual(0.9, buckets[9].low_value)
self.assertEqual(float('inf'), buckets[9].high_value)
self.assertEqual(1, buckets[9].sample_count)
def testGetProtoStrings(self):
# Tests converting string examples into the feature stats proto
examples = []
for i in range(2):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hello')
examples.append(example)
for i in range(3):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hi')
examples.append(example)
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hey')
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('test', test_data.name)
self.assertEqual(6, test_data.num_examples)
strfeat = test_data.features[0]
self.assertEqual('str', strfeat.name)
self.assertEqual(self.fs.fs_proto.STRING, strfeat.type)
self.assertEqual(3, strfeat.string_stats.unique)
self.assertAlmostEqual(19 / 6.0, strfeat.string_stats.avg_length, 4)
self.assertEqual(0, strfeat.string_stats.common_stats.num_missing)
self.assertEqual(6, strfeat.string_stats.common_stats.num_non_missing)
self.assertEqual(1, strfeat.string_stats.common_stats.min_num_values)
self.assertEqual(1, strfeat.string_stats.common_stats.max_num_values)
self.assertEqual(1, strfeat.string_stats.common_stats.avg_num_values)
hist = strfeat.string_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.6, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(.6, buckets[9].sample_count)
self.assertEqual(2, len(strfeat.string_stats.top_values))
self.assertEqual(3, strfeat.string_stats.top_values[0].frequency)
self.assertEqual('hi', strfeat.string_stats.top_values[0].value)
self.assertEqual(2, strfeat.string_stats.top_values[1].frequency)
self.assertEqual('hello', strfeat.string_stats.top_values[1].value)
buckets = strfeat.string_stats.rank_histogram.buckets
self.assertEqual(3, len(buckets))
self.assertEqual(0, buckets[0].low_rank)
self.assertEqual(0, buckets[0].high_rank)
self.assertEqual(3, buckets[0].sample_count)
self.assertEqual('hi', buckets[0].label)
self.assertEqual(2, buckets[2].low_rank)
self.assertEqual(2, buckets[2].high_rank)
self.assertEqual(1, buckets[2].sample_count)
self.assertEqual('hey', buckets[2].label)
def testGetProtoMultipleDatasets(self):
# Tests converting multiple datsets into the feature stats proto
# including ensuring feature order is consistent in the protos.
examples1 = []
for i in range(2):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'one')
example.features.feature['num'].int64_list.value.append(0)
examples1.append(example)
examples2 = []
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(1)
example.features.feature['str'].bytes_list.value.append(b'two')
examples2.append(example)
entries1 = {}
for i, example1 in enumerate(examples1):
self.fs._ParseExample(example1.features.feature, [], entries1, i)
entries2 = {}
for i, example2 in enumerate(examples2):
self.fs._ParseExample(example2.features.feature, [], entries2, i)
datasets = [{
'entries': entries1,
'size': len(examples1),
'name': 'test1'
}, {
'entries': entries2,
'size': len(examples2),
'name': 'test2'
}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(2, len(p.datasets))
test_data_1 = p.datasets[0]
self.assertEqual('test1', test_data_1.name)
self.assertEqual(2, test_data_1.num_examples)
num_feat_index = 0 if test_data_1.features[0].name == 'num' else 1
self.assertEqual(0, test_data_1.features[num_feat_index].num_stats.max)
test_data_2 = p.datasets[1]
self.assertEqual('test2', test_data_2.name)
self.assertEqual(1, test_data_2.num_examples)
self.assertEqual(1, test_data_2.features[num_feat_index].num_stats.max)
def testGetEntriesNoFiles(self):
features, num_examples = self.fs._GetEntries(['test'], 10,
lambda unused_path: [])
self.assertEqual(0, num_examples)
self.assertEqual({}, features)
@staticmethod
def get_example_iter():
def ex_iter(unused_filename):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example.SerializeToString())
return examples
return ex_iter
def testGetEntries_one(self):
features, num_examples = self.fs._GetEntries(['test'], 1,
self.get_example_iter())
self.assertEqual(1, num_examples)
self.assertTrue('num' in features)
def testGetEntries_oneFile(self):
unused_features, num_examples = self.fs._GetEntries(['test'], 1000,
self.get_example_iter())
self.assertEqual(50, num_examples)
def testGetEntries_twoFiles(self):
unused_features, num_examples = self.fs._GetEntries(['test0', 'test1'],
1000,
self.get_example_iter())
self.assertEqual(100, num_examples)
def testGetEntries_stopInSecondFile(self):
unused_features, num_examples = self.fs._GetEntries([
'test@0', 'test@1', 'test@2', 'test@3', 'test@4', 'test@5', 'test@6',
'test@7', 'test@8', 'test@9'
], 75, self.get_example_iter())
self.assertEqual(75, num_examples)
if __name__ == '__main__':
googletest.main()
| |
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from random import random
import numpy as np
import random
import pywavefront
from OpenGL.raw.GLU import gluLookAt
import scipy.optimize as so
import scipy.integrate as si
import copy
def create_shader(shader_type, source):
shader = glCreateShader(shader_type)
glShaderSource(shader, source)
glCompileShader(shader)
return shader
def draw():
glMatrixMode(GL_PROJECTION)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, pointdata)
glColorPointer(3, GL_FLOAT, 0, pointcolor)
glDrawArrays(GL_TRIANGLES, 0,len(pointdata)*3)#10262)#2196)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
glutSwapBuffers()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
lastx=0
lasty=0
def MouseMotion (x, y):
global lastx, lasty
glTranslatef((x-lastx)/300,-(y-lasty)/300,0)
lastx = x
lasty = y
glutPostRedisplay ()
def MouseRotate (x, y):
global lastx, lasty, pointdata
glRotatef((x-lastx)/3,0,1,0)
glRotatef((y-lasty)/3,1,0,0)
lastx = x
lasty = y
glutPostRedisplay ()
def specialkeys(key, x, y):
global pointcolor
if key == GLUT_KEY_UP:
glRotatef(5, 1, 0, 0)
if key == GLUT_KEY_DOWN:
glRotatef(-5, 1, 0, 0)
if key == GLUT_KEY_LEFT:
glRotatef(5, 0, 1, 0)
if key == GLUT_KEY_RIGHT:
glRotatef(-5, 0, 1, 0)
if key == GLUT_KEY_END:
pointcolor = [[random(), random(), random()], [random(), random(), random()], [random(), random(), random()]]
if key == GLUT_KEY_F1:
for j in range(0,2):
for i in range(len(list_of_vel)):
Col =Temp_to_color(temperature.T0[i])
for j in range(diapazon[i], diapazon[i + 1]):
pointcolor[j] = [Col, Col, Col]
#for j in range(diapazon[4], diapazon[5]):
# pointcolor[j] = [1, 1, 1]
temperature.next_step()
print(temperature.T0)
def parseFile(file_name):
count = 0
start = 0
list_of_count_of_vel = []
list_of_vel = []
list_of_triang = []
index = 0
lst_vel = []
lst_f = []
total = 1
count_of_v = 0
for line in open(file_name, 'r'):
values = line.split()
if len(values) < 2:
continue
if(values[0]== '#' and values[1] == 'object' and count != 0):
list_of_count_of_vel.append(count)
list_of_vel.append(lst_vel)
list_of_triang.append(lst_f)
index = index + 1
total = total + count_of_v
count_of_v = 0
count = 0
lst_vel = []
lst_f = []
if (values[0] == '#' and values[1] == 'object' and count == 0):
start = 1
if(values[0] == 'f' and count == 0):
start = 1
if(start == 1 and values[0] == 'f'):
count = count + 1
lst_f.append([float(values[1])-total,float(values[2])-total,float(values[3])-total])
if (start == 1 and values[0] == 'v'):
lst_vel.append([float(values[1]), float(values[2]), float(values[3])])
count_of_v = count_of_v + 1
list_of_vel.append(lst_vel)
list_of_triang.append(lst_f)
list_of_count_of_vel.append(count)
#print("list_of_count_of_vel=",list_of_count_of_vel)
#print("list_of_triang=", list_of_triang)
#print("list_of_vel=", list_of_vel)
#print("list_of_count",sum(list_of_count_of_vel),3*sum(list_of_count_of_vel) )
return list_of_count_of_vel,list_of_triang,list_of_vel
def Form_Triangles(list_of_vel,list_of_tri):
triangles = []
diapazon = np.zeros(len(list_of_vel) + 1, dtype=np.int)
for i in range(len(list_of_vel)):
diapazon[i + 1] = diapazon[i] + len(list_of_tri[i])
for el in list_of_tri[i]:
triangle = np.array([list_of_vel[i][int(el[0])], list_of_vel[i][int(el[1])], list_of_vel[i][int(el[2])]])
triangles.append(triangle)
return np.array(triangles), diapazon
def Temp_to_color(temp):
B=5
D=5
if temp<50:
return [np.exp(-(temp-50)**2/B), np.exp(-(temp)**2/B),np.exp(-(temp+50)**2/B)]
return [np.exp(-(temp-62)**2/B), 0, np.exp(-(temp-60)**2/D)]
def SquareTr(triangle):
l1 = np.linalg.norm(triangle[0] - triangle[1])
l2 = np.linalg.norm(triangle[0] - triangle[2])
l3 = np.linalg.norm(triangle[2] - triangle[1])
p=(l1+l2+l3)/2
return (p*(p-l1)*(p-l2)*(p-l3))**0.5
def SquaresOfParts(triangles, diapazon):
squares = []
for i in range(len(diapazon) - 1):
square = 0
for j in range(diapazon[i], diapazon[i+1]):
square += SquareTr(triangles[j])
squares.append(square)
return squares
if __name__ == '__main__':
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB)
glutInitWindowSize(300, 300)
glutInitWindowPosition(50, 50)
glutInit(sys.argv)
glutCreateWindow(b"Shaders!")
glutDisplayFunc(draw)
glutIdleFunc(draw)
glutSpecialFunc(specialkeys)
glutPassiveMotionFunc(MouseMotion)
glutMotionFunc(MouseRotate)
glClearColor(0.2, 0.2, 0.2, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
vertex = create_shader(GL_VERTEX_SHADER, """
varying vec4 vertex_color;
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
vertex_color = gl_Color;
}
""")
fragment = create_shader(GL_FRAGMENT_SHADER, """
varying vec4 vertex_color;
void main() {
gl_FragColor = vertex_color;
}
""")
program = glCreateProgram()
glAttachShader(program, vertex)
glAttachShader(program, fragment)
glLinkProgram(program)
glUseProgram(program)
listOfDotsNumber, list_of_tri, list_of_vel = parseFile('model2.obj')
NumberOfParts = len(listOfDotsNumber)
print("M=",NumberOfParts)
triangles, diapazon = Form_Triangles(list_of_vel, list_of_tri)
print("triangles", triangles)
print("range", diapazon)
squareOfParts = SquaresOfParts(triangles, diapazon)
print("squares", squareOfParts)
triangles /= (2 * triangles.max())
pointcolorTri = np.zeros((diapazon[len(list_of_vel)], 3, 3))
from random import random
for i in range(0, len(listOfDotsNumber)):
m = random()
k = random()
for j in range(diapazon[i], diapazon[i + 1]):
pointcolorTri[j] = [k, 0.0, m]
#pointcolorTri[0] = [1, 1, 1]
#print("triangles=",triangles, "len=", len(triangles), " x ",len(triangles[0]),"=",len(triangles)*len(triangles[0]))
#print("diapazon=", diapazon)
pointdata=triangles
pointcolor=pointcolorTri
def func_solve(T, t, lambada, Q_R, c, eps, S):
NumberOfParts = len(c)
right_part = np.zeros(NumberOfParts)
StephBolC = 5.67
for i in range(NumberOfParts):
for j in range(NumberOfParts):
if i != j:
right_part[i] -= lambada[i, j] * S[i, j] * (T[i] - T[j])
right_part[i] -= eps[i] * S[i, i] * StephBolC * (T[i] / 100) ** 4
right_part[i] += Q_R[i](t)
right_part[i] /= c[i]
return right_part
class TempComputer:
def __init__(self, lambada, Q_R, c, eps, S, tau, NumberOfParts):
self.lambada = lambada
self.Q_R = Q_R
self.c = c
self.eps = eps
self.S = S
self.counter = 0
print("NumberOfParts=",NumberOfParts,"c=",c)
T = self.T0 = so.fsolve(func_solve, np.zeros(NumberOfParts), args=(0, lambada, Q_R, c, eps, S,))
self.T = copy.copy(self.T0)
self.tau = tau
self.NumberOfParts = NumberOfParts
def next_step(self):
Tm = np.linspace((self.counter - 1) * self.tau, self.counter * self.tau, 2)
print("Tm=",Tm)
self.counter += 1
self.T = si.odeint(func_solve, self.T0, Tm, args=(self.lambada, self.Q_R, self.c, self.eps, self.S,))
self.T0 = copy.copy(self.T[1])
return self.T[1]
squareOfElem=np.array([[ 36.1672021, 4*np.pi , 0 , 0 , 0],
[ 4*np.pi , 219.8340987, 4*np.pi , 0 , 0],
[ 0. , 4*np.pi , 12.3660143, np.pi , 0],
[ 0. , 0. , np.pi , 99.4076902, np.pi],
[ 0. , 0. ,0. , np.pi , 268]])
squareOfElem=np.array([[ 36.1672021, 4*np.pi , 0 , 0 , 0],
[ 4*np.pi , 99.4076907, 0 , 4*np.pi, 0],
[ 0. , 0 , 12.3660143, np.pi , np.pi],
[ 0. , 4*np.pi , np.pi , 268 , 0],
[ 0. , 0. , np.pi , 0 , 219.8341097]])
print("square=", squareOfElem)
#for i in range(NumberOfParts):
# for j in range(i + 1, NumberOfParts):
# temp = (squareOfElem[i, j] + squareOfElem[j, i]) / 2
# squareOfElem[i, j] = temp
# squareOfElem[j, i] = temp
#print ("squareOfElem=",squareOfElem)
eps = [0.05, 0.05, 0.01, 0.1, 0.1]
c = [520, 520, 840, 900, 900]
lambada = np.zeros((NumberOfParts, NumberOfParts))
lambada[0, 1] = 20
lambada[1, 0] = 20
lambada[1, 3] = 130
lambada[3, 1] = 130
lambada[2, 3] = 10.5
lambada[3, 2] = 10.5
lambada[2, 4] = 119
lambada[4, 2] = 119
Q_R = []
for i in range(NumberOfParts):
f = lambda t: [0]
Q_R.append(f)
A = 2
Q_R[0] = lambda t: [A * (20 + 3 * np.cos(t / 4))]
tau = 10 ** 2
print("pointcolor[6]=",pointcolor[6])
temperature = TempComputer(lambada, Q_R, c, eps, squareOfElem, tau, NumberOfParts)
for i in range(len(list_of_vel)):
Col = Temp_to_color(temperature.T0[i])
print('len(list_of_vel) = ', len(list_of_vel))
print("temperature.T0[i]=",temperature.T0[i])
print("Col=",Col)
for j in range(diapazon[i], diapazon[i + 1]):
pointcolor[j] = [Col, Col, Col]
print(3*12+361*2*3)
print("len(pointdata)_all=",len(pointdata)*len(pointdata[0]))
print("len(pointdata)=", len(pointdata))
print("len(pointcolor)=", len(pointcolor))
#global eye
#eye = np.zeros(3)
#global lat
#lat = 0
#global lon
#lon = np.arctan2(0, -1)
glutMainLoop()
| |
###############################################################################
# Copyright 2014 OCLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# to run this test from the command line: python -m tests.accesstoken_test
import unittest
from authliboclc import accesstoken, user, refreshtoken
class AccessTokenTests(unittest.TestCase):
""" Create a mock access token. """
def setUp(self):
self._my_refresh_token = refreshtoken.RefreshToken(
tokenValue='rt_25fXauhJC09E4kwFxcf4TREkTnaRYWHgJA0W',
expires_in=1199,
expires_at='2014-03-13 15:44:59Z'
)
self._options = {'scope': ['WMS_NCIP', 'WMS_ACQ'],
'authenticating_institution_id': '128807',
'context_institution_id': '128808',
'redirect_uri': 'ncip://testapp',
'refresh_token': self._my_refresh_token,
'code': 'unknown'}
self._authorization_server = 'https://authn.sd00.worldcat.org/oauth2'
self._my_access_token = accesstoken.AccessToken(self._authorization_server,
'authorization_code',
self._options)
def testAuthorizationServer(self):
self.assertEqual('https://authn.sd00.worldcat.org/oauth2',
self._my_access_token.authorization_server)
""" Make sure only the correct valid access token options are listed. """
def testValidOptions(self):
options = accesstoken.AccessToken.valid_options
valid_options = [
'scope',
'authenticating_institution_id',
'context_institution_id',
'redirect_uri',
'code',
'refresh_token'
]
self.assertEqual(options, valid_options,
'Options must be scope, authenticating_institution_id, context_institution_id, redirect_uri, '
'code and refresh_token')
""" Make sure the list of valid grant types is correct. """
def testValidGrantTypes(self):
grant_types = accesstoken.AccessToken.validGrantTypes
valid_grant_types = [
'authorization_code',
'refresh_token',
'client_credentials'
]
self.assertEqual(grant_types, valid_grant_types, 'Grant types must be authorization_code, refresh_token, '
'client_credentials')
""" Check that attempts to create Access Tokens work, and incorrect parameters raise exceptions. """
def testCreateAccessToken(self):
self.assertEqual(self._my_access_token.scope, ['WMS_NCIP', 'WMS_ACQ'])
self.assertEqual(self._my_access_token.authenticating_institution_id, '128807')
self.assertEqual(self._my_access_token.context_institution_id, '128808')
self.assertEqual(self._my_access_token.redirect_uri, 'ncip://testapp')
self.assertEqual(self._my_access_token.refresh_token.refresh_token, 'rt_25fXauhJC09E4kwFxcf4TREkTnaRYWHgJA0W')
self.assertEqual(self._my_access_token.code, 'unknown')
with self.assertRaises(accesstoken.InvalidGrantType):
accesstoken.AccessToken(authorization_server=self._authorization_server)
# Tests to make sure there are no missing parameters for authorization_code
with self.assertRaises(accesstoken.NoOptionsPassed):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='authorization_code',
options={})
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='authorization_code',
options={'authenticating_institution_id': '', 'context_institution_id': ''})
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='authorization_code',
options={'code': '', 'context_institution_id': ''})
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='authorization_code',
options={'code': '', 'authenticating_institution_id': ''})
# Tests to make sure there are no missing parameters for client_credentials
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='client_credentials',
options={'refresh_token': '',
'context_institution_id': '',
'scope': ''})
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='refresh_token',
options={'client_credentials': '',
'authenticating_institution_id': '',
'scope': ''})
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='client_credentials',
options={'refresh_token': '',
'authenticating_institution_id': '',
'context_institution_id': ''})
# Tests to make sure there are no missing parameters for refresh_token
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='refresh_token',
options={'authenticating_institution_id': '',
'context_institution_id': '',
'scope': ''})
# Test that scope must be a list of scopes, not a string
with self.assertRaises(accesstoken.RequiredOptionsMissing):
accesstoken.AccessToken(authorization_server=self._authorization_server,
grant_type='authorization_code',
options={'code': '',
'redirect_uri': '',
'authenticating_institution_id': '',
'context_institution_id': '',
'scope': 'WMS_ACQ'})
""" Make sure an expired token is calculated properly. """
def testIsExpired(self):
self._my_access_token.expires_at = '2014-01-01 12:00:00Z'
self.assertTrue(self._my_access_token.is_expired())
self._my_access_token.expires_at = '2099-01-01 12:00:00Z'
self.assertFalse(self._my_access_token.is_expired())
""" Test creation of an access token for authorization_code. """
def testGetAccessTokenURLforAuthorizationCode(self):
sample_access_token = accesstoken.AccessToken(self._authorization_server,
'authorization_code',
self._options)
self.assertEqual(sample_access_token.get_access_token_url(), (
'https://authn.sd00.worldcat.org/oauth2/accessToken?' +
'grant_type=authorization_code' +
'&code=unknown' +
'&authenticatingInstitutionId=128807' +
'&contextInstitutionId=128808' +
'&redirect_uri=ncip%3A%2F%2Ftestapp')
)
""" Test creation of an access token for client_credentials. """
def testGetAccessTokenURLforClientCredentials(self):
sample_access_token = accesstoken.AccessToken(self._authorization_server,
'client_credentials',
self._options)
self.assertEqual(sample_access_token.get_access_token_url(), (
'https://authn.sd00.worldcat.org/oauth2/accessToken?' +
'grant_type=client_credentials&' +
'authenticatingInstitutionId=128807&' +
'contextInstitutionId=128808&' +
'scope=WMS_NCIP%20WMS_ACQ')
)
""" Test creation of an access token for refresh_token. """
def testGetAccessTokenURLforRefreshToken(self):
sample_access_token = accesstoken.AccessToken(self._authorization_server,
'refresh_token',
self._options)
self.assertEqual(sample_access_token.get_access_token_url(), (
'https://authn.sd00.worldcat.org/oauth2/accessToken?' +
'grant_type=refresh_token' +
'&refresh_token=rt_25fXauhJC09E4kwFxcf4TREkTnaRYWHgJA0W'))
""" Create a mock token response and verify parsing is corrent. """
def testParseTokenResponse(self):
sample_access_token = accesstoken.AccessToken(self._authorization_server,
'authorization_code',
self._options)
sample_access_token.parse_token_response(
'{' +
'"expires_at":"2014-03-13 15:44:59Z",' +
'"principalIDNS":"urn:oclc:platform:128807",' +
'"principalID":"2334dd24-b27e-49bd-8fea-7cc8de670f8d",' +
'"error_code":"trouble",' +
'"expires_in":1199,' +
'"token_type":"bearer",' +
'"context_institution_id":"128807",' +
'"access_token":"tk_25fXauhJC09E5kwFxcf4TRXkTnaRYWHgJA0W",' +
'"refresh_token":"rt_25fXauhJC09E5kwFxcf4TRXkTnaRYWHgJA0W",' +
'"refresh_token_expires_in":1900,' +
'"refresh_token_expires_at":"2014-03-13 15:44:59Z"' +
'}'
)
expected_user = user.User(
authenticating_institution_id='128807',
principal_id='2334dd24-b27e-49bd-8fea-7cc8de670f8d',
principal_idns='urn:oclc:platform:128807'
)
expected_refresh_token = refreshtoken.RefreshToken(
tokenValue='rt_25fXauhJC09E5kwFxcf4TRXkTnaRYWHgJA0W',
expires_in=1900,
expires_at='2014-03-13 15:44:59Z'
)
self.assertEqual(sample_access_token.access_token_string, 'tk_25fXauhJC09E5kwFxcf4TRXkTnaRYWHgJA0W')
self.assertEqual(sample_access_token.type, 'bearer')
self.assertEqual(sample_access_token.expires_at, '2014-03-13 15:44:59Z')
self.assertEqual(sample_access_token.expires_in, 1199)
self.assertEqual(sample_access_token.error_code, 'trouble')
self.assertEqual(sample_access_token.context_institution_id, '128807')
self.assertEqual(user.User, type(sample_access_token.user))
self.assertEqual(expected_user.authenticating_institution_id,
sample_access_token.user.authenticating_institution_id)
self.assertEqual(expected_user.principal_id, sample_access_token.user.principal_id)
self.assertEqual(expected_user.principal_idns, sample_access_token.user.principal_idns)
self.assertEqual(refreshtoken.RefreshToken, type(sample_access_token.refresh_token))
self.assertEqual(expected_refresh_token.refresh_token, sample_access_token.refresh_token.refresh_token)
self.assertEqual(expected_refresh_token.expires_in, sample_access_token.refresh_token.expires_in)
self.assertEqual(expected_refresh_token.expires_at, sample_access_token.refresh_token.expires_at)
"""Test that the string representation of the class is complete."""
def testStringRepresenationOfClass(self):
"""Create a new access token which hasn't been authenticated yet."""
sample_access_token = accesstoken.AccessToken(
self._authorization_server,
grant_type='authorization_code',
options={'scope': ['WMS_NCIP', 'WMS_ACQ'],
'authenticating_institution_id': '128807',
'context_institution_id': '128808',
'redirect_uri': 'https://localhost:8000/auth/',
'code': 'unknown'
}
)
"""Assume authentication has occured and these parameters are now filled in."""
sample_access_token.expires_at = '2014-04-08 13:38:29Z'
sample_access_token.expires_in = 1198
sample_access_token.access_token_string = 'tk_TBHrsDbSrWW1oS7d3gZr7NJb7PokyOFlf0pr'
sample_access_token.type = 'bearer'
sample_access_token.error_code = 404
sample_access_token.error_message = 'No reply at all.'
sample_access_token.error_url = 'http://www.noreply.oclc.org/auth/'
self.assertEqual(str(sample_access_token), (
"\n" +
"access_token_url: https://authn.sd00.worldcat.org/oauth2/accessToken?\n" +
" grant_type=authorization_code\n" +
" &code=unknown\n" +
" &authenticatingInstitutionId=128807\n" +
" &contextInstitutionId=128808\n" +
" &redirect_uri=https%3A%2F%2Flocalhost%3A8000%2Fauth%2F\n" +
"\n" +
"access_token_string tk_TBHrsDbSrWW1oS7d3gZr7NJb7PokyOFlf0pr\n" +
"authenticating_institution_id: 128807\n" +
"authorization_server: https://authn.sd00.worldcat.org/oauth2\n" +
"code: unknown\n" +
"context_institution_id: 128808\n" +
"error_code: 404\n" +
"error_message: No reply at all.\n" +
"error_url: http://www.noreply.oclc.org/auth/\n" +
"expires_at: 2014-04-08 13:38:29Z\n" +
"expires_in: 1198\n" +
"grant_type: authorization_code\n" +
"options: None\n" +
"redirect_uri: https://localhost:8000/auth/\n" +
"refresh_token:\n" +
"None\n" +
"scope: ['WMS_NCIP', 'WMS_ACQ']\n" +
"type: bearer\n" +
"user:\n" +
"None\n" +
"wskey:\n" +
"None")
)
def main():
unittest.main()
if __name__ == '__main__':
main()
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice, taskqueue
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from custom_logger import CustomLogger
from data_source_registry import CreateDataSources
from environment import GetAppVersion
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from refresh_tracker import RefreshTracker
from render_refresher import RenderRefresher
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from timer import Timer
_log = CustomLogger('cron')
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
pinned_commit=None):
return HostFileSystemProvider(object_store_creator,
pinned_commit=pinned_commit)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Refreshes may time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _log and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_log.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Collect all DataSources, the PlatformBundle, the ContentProviders, and
# any other statically renderered contents (e.g. examples content),
# and spin up taskqueue tasks which will refresh any cached data relevant
# to these assets.
#
# TODO(rockot/kalman): At the moment examples are not actually refreshed
# because they're too slow.
_log.info('starting')
server_instance = self._GetSafeServerInstance()
master_fs = server_instance.host_file_system_provider.GetMaster()
if 'commit' in self._request.arguments:
master_commit = self._request.arguments['commit']
else:
master_commit = master_fs.GetCommitID().Get()
# This is the guy that would be responsible for refreshing the cache of
# examples. Here for posterity, hopefully it will be added to the targets
# below someday.
render_refresher = RenderRefresher(server_instance, self._request)
# Used to register a new refresh cycle keyed on |master_commit|.
refresh_tracker = RefreshTracker(server_instance.object_store_creator)
# Get the default taskqueue
queue = taskqueue.Queue()
# GAE documentation specifies that it's bad to add tasks to a queue
# within one second of purging. We wait 2 seconds, because we like
# to go the extra mile.
queue.purge()
time.sleep(2)
success = True
try:
data_sources = CreateDataSources(server_instance)
targets = (data_sources.items() +
[('content_providers', server_instance.content_providers),
('platform_bundle', server_instance.platform_bundle)])
title = 'initializing %s parallel targets' % len(targets)
_log.info(title)
timer = Timer()
tasks = []
for name, target in targets:
refresh_paths = target.GetRefreshPaths()
tasks += [('%s/%s' % (name, path)).strip('/') for path in refresh_paths]
# Start a new refresh cycle. In order to detect the completion of a full
# cache refresh, the RefreshServlet (which handles individual refresh
# tasks) will mark each task complete and check the set of completed tasks
# against the set registered here.
refresh_tracker.StartRefresh(master_commit, tasks).Get()
for task in tasks:
queue.add(taskqueue.Task(url='/_refresh/%s' % task,
params={'commit': master_commit}))
_log.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_log.error('uncaught error: %s' % traceback.format_exc())
success = False
raise
finally:
_log.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe commit,
meaning the last commit that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent commit, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentCommit())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_log.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentCommit(self):
'''Gets the commit of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete commit so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetMaster())
return head_fs.GetCommitID().Get()
def _CreateServerInstance(self, commit):
'''Creates a ServerInstance pinned to |commit|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, pinned_commit=commit)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
| |
# Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
#
# This program is licensed to you under the Apache License Version 2.0,
# and you may not use this file except in compliance with the Apache License Version 2.0.
# You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Apache License Version 2.0 is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
import datetime, json, uuid, time
from functools import partial
from random import choice
from invoke import run, task
import boto
from boto import kinesis
import boto.dynamodb2
from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, GlobalAllIndex
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
import boto.cloudformation
import time
import math, os
from filechunkio import FileChunkIO
# setting for AWS Lambda and Lambda-Exec-Role
REGION = "us-east-1"
IAM_ROLE_ARN = ""
IAM_ROLE = ""
POLICY = """{
"Statement":[{
"Effect":"Allow",
"Action":["*"],
"Resource":["*"]}]}"""
POLICY_NAME = "AdministratorAccess"
STACK_NAME = "LambdaStack"
TEMPLATE_URL = "https://snowplow-hosted-assets.s3.amazonaws.com/third-party/aws-lambda/lambda-admin.template"
CAPABILITIES = ["CAPABILITY_IAM"]
JARFILE = "./target/scala-2.11/aws-lambda-scala-example-project-0.2.0.jar"
S3_BUCKET = "aws_scala_lambda_bucket"
S3_KEY = os.path.basename(JARFILE)
FUNCTION_NAME = "ProcessingKinesisLambdaDynamoDB"
# Selection of EventType values
COLORS = ['Red','Orange','Yellow','Green','Blue']
# DynamoDB settings
THROUGHPUT_READ = 20
THROUGHPUT_WRITE = 20
# AWS Kinesis Data Generator
def picker(seq):
"""
Returns a new function that can be called without arguments
to select and return a random color
"""
return partial(choice, seq)
def create_event():
"""
Returns a choice of color and builds and event
"""
event_id = str(uuid.uuid4())
color_choice = picker(COLORS)
return (event_id, {
"id": event_id,
"timestamp": datetime.datetime.now().isoformat(),
"eventType": color_choice()
})
def write_event(conn, stream_name):
"""
Returns the event and event event_payload
"""
event_id, event_payload = create_event()
event_json = json.dumps(event_payload)
conn.put_record(stream_name, event_json, event_id)
return event_json
@task
def upload_s3():
"""
Upload jar file to s3
"""
source_path = JARFILE
source_size = os.stat(source_path).st_size
# create bucket
import boto
conn = boto.connect_s3()
bucket = conn.create_bucket(S3_BUCKET)
# upload
c = boto.connect_s3()
b = c.get_bucket(S3_BUCKET)
# Create a multipart upload request
mp = b.initiate_multipart_upload(os.path.basename(source_path))
# Use a chunk size of 5 MiB
chunk_size = 5242880
chunk_count = int(math.ceil(source_size / float(chunk_size)))
# Send the file parts, using FileChunkIO to create a file-like object
# that points to a certain byte range within the original file. We
# set bytes to never exceed the original file size.
for i in range(chunk_count):
offset = chunk_size * i
bytes = min(chunk_size, source_size - offset)
with FileChunkIO(source_path, 'r', offset=offset,
bytes=bytes) as fp:
mp.upload_part_from_file(fp, part_num=i + 1)
# Finish the upload
mp.complete_upload()
print("Jar uploaded to S3 bucket " + S3_BUCKET)
@task
def create_role():
"""
Creates IAM role using CloudFormation for AWS Lambda service
"""
client_cf = boto.cloudformation.connect_to_region(REGION)
response = client_cf.create_stack(
stack_name=STACK_NAME,
template_url=TEMPLATE_URL,
capabilities=CAPABILITIES
)
print response
time.sleep(7)
print "Creating roles"
time.sleep(7)
print "Still creating"
time.sleep(7)
print "Giving Lambda proper permissions"
# get name of LambdaExecRole
client_iam = boto.connect_iam()
roles = client_iam.list_roles()
list_roles = roles['list_roles_response']['list_roles_result']['roles']
for i in range(len(list_roles)):
if STACK_NAME+"-LambdaExecRole" in list_roles[i].arn:
IAM_ROLE = list_roles[i].role_name
print "Trying..."
# grants Admin access to LambdaExecRole to access Cloudwatch, DynamoDB, Kinesis
client_iam.put_role_policy(IAM_ROLE, POLICY_NAME, POLICY)
print "Created role"
@task
def generate_events(profile, region, stream):
"""
load demo data with python generator script for SimpleEvents
"""
conn = kinesis.connect_to_region(region, profile_name=profile)
while True:
event_json = write_event(conn, stream)
print "Event sent to Kinesis: {}".format(event_json)
@task
def create_lambda():
"""
Create aws-lambda-scala-example-project AWS Lambda service
"""
# TODO: switch to use all boto
IAM_ROLE_ARN = get_iam_role_arn()
print("Creating AWS Lambda function.")
run("aws lambda create-function --region {} \
--function-name {} \
--code S3Bucket={},S3Key={} \
--role {} \
--handler com.snowplowanalytics.awslambda.LambdaFunction::recordHandler \
--runtime java8 --timeout 60 --memory-size 1024".format(REGION, FUNCTION_NAME, S3_BUCKET, S3_KEY, IAM_ROLE_ARN), pty=True)
def get_iam_role_arn():
client_iam = boto.connect_iam()
roles = client_iam.list_roles()
list_roles = roles['list_roles_response']['list_roles_result']['roles']
for i in range(len(list_roles)):
if STACK_NAME+"-LambdaExecRole" in list_roles[i].arn:
IAM_ROLE_ARN = list_roles[i].arn
return IAM_ROLE_ARN
@task
def configure_lambda(stream):
"""
Configure Lambda function to use Kinesis
"""
print("Configured AWS Lambda service")
IAM_ROLE_ARN = get_iam_role_arn()
aws_lambda = boto.connect_awslambda()
event_source = kinesis_stream(stream)
response_add_event_source = aws_lambda.add_event_source(event_source,
FUNCTION_NAME,
IAM_ROLE_ARN,
batch_size=100,
parameters=None)
event_source_id = response_add_event_source['UUID']
while response_add_event_source['IsActive'] != 'true':
print('Waiting for the event source to become active')
sleep(5)
response_add_event_source = aws_lambda.get_event_source(event_source_id)
print('Added Kinesis as event source for Lambda function')
@task
def create_dynamodb_table(profile, region, table):
"""
DynamoDB table creation with AWS Boto library in Python
"""
connection = boto.dynamodb2.connect_to_region(region, profile_name=profile)
aggregate = Table.create(table,
schema=[
HashKey("BucketStart"),
RangeKey("EventType"),
],
throughput={
'read': THROUGHPUT_READ,
'write': THROUGHPUT_WRITE
},
connection=connection
)
@task
def create_kinesis_stream(stream):
"""
Creates our Kinesis stream
"""
kinesis = boto.connect_kinesis()
response = kinesis.create_stream(stream, 1)
pause_until_kinesis_active(stream)
print("Kinesis successfully created")
def pause_until_kinesis_active(stream):
kinesis = boto.connect_kinesis()
# Wait for Kinesis stream to be active
while kinesis.describe_stream(stream)['StreamDescription']['StreamStatus'] != 'ACTIVE':
print('Kinesis stream [' + stream + '] not active yet')
time.sleep(5)
def kinesis_stream(stream):
"""
Returns Kinesis stream arn
"""
kinesis = boto.connect_kinesis()
return kinesis.describe_stream(stream)['StreamDescription']['StreamARN']
@task
def describe_kinesis_stream(stream):
"""
Prints status Kinesis stream
"""
print("Created: ")
print(kinesis_stream(stream))
| |
#! /usr/bin/env python
#----------------------------------------------------------------
# Author: Jason Gors <jasonDOTgorsATgmail>
# Creation Date: 07-30-2013
# Purpose: this specifies what types of packages can be handled: currently: git repos from github;
# git & hg repos from bitbucket; git, hg & bzr local repos.
#----------------------------------------------------------------
import os
from os.path import join
from site import USER_BASE as user_base
import shutil
import subprocess
import sys
import glob
import itertools
import locale # needed in py3 for decoding output from subprocess pipes
from six.moves.urllib.request import urlopen
from Bep.languages import languages
from Bep.core.release_info import name
from Bep.core import utils
#from Bep.core.utils_db import (handle_db_after_an_install, handle_db_for_removal,
#handle_db_for_branch_renaming,
#get_lang_cmd_branch_was_installed_with)
class Package(object):
def __init__(self, lang_arg, pkg_type, install_dirs, args, **kwargs):
if 'python' in lang_arg:
self.lang_using = languages.Python() # this is a class instance to have it's methods accessed throughout
#elif 'otherlanguage' in args.language: # for other languages
#self.lang_using = languages.OtherLanguage()
else:
print("\nError: {0} currently not supported.".format(lang_arg))
raise SystemExit
self.lang_cmd = self.lang_using.get_lang_cmd(lang_arg)
self.pkg_type = pkg_type
self.installed_pkgs_dir = install_dirs['installed_pkgs_dir']
self.install_logs_dir = install_dirs['install_logs_dir']
self.lang_install_dir = join(self.installed_pkgs_dir, self.lang_cmd)
self.lang_logs_dir = join(self.install_logs_dir, self.lang_cmd)
self.pkg_type_install_dir = join(self.lang_install_dir, self.pkg_type)
self.pkg_type_logs_dir = join(self.lang_logs_dir, self.pkg_type)
################## try to add in ######################
##### TODO add this stuff in so they don't have to be defined repeatedly below
#self.pkg_name_install_dir = join(self.pkg_type_install_dir, pkg_to_install_name)
#self.pkg_name_logs_dir = join(self.pkg_type_logs_dir, pkg_to_install_name)
#branch_install_dir = join(self.pkg_name_install_dir, branch_to_install)
#branch_logs_dir = join(self.pkg_name_logs_dir, branch_to_install)
########################################################
def __cmd_output(self, cmd, verbose):
encoding = locale.getdefaultlocale()[1] # py3 stuff b/c this is encoded as b('...')
if verbose: # shows all the output when running cmd -- sometimes lots of stuff
print(cmd)
cmd = cmd.split(' ') # to use w/o needing to set shell=True
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout:
line = line.decode(encoding)
print(line.rstrip())
return_val = p.wait()
else: # for a quiet mode -- only shows the output when the cmd fails
cmd = cmd.split(' ') # to use w/o needing to set shell=True
#print('cmd:'); print(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.out = out.decode(encoding)
self.err = err.decode(encoding)
return_val = p.returncode
return return_val
def parse_pkg_to_install_name(self, pkg_to_install):
def strip_end_of_str(str_to_strip):
if str_to_strip.endswith('/'):
return str_to_strip.rstrip('/') # b/c noticed problem with local_repo installs
elif str_to_strip.endswith('.git'):
return str_to_strip.rstrip('.git') # would be a problem if git repos have this at the end
else:
return str_to_strip # if nothing wrong, then just give back the string feed into this here.
# this bit looks to see if a branch/version is specified for installing; if not, then it gets master.
#pkg_branch_test = pkg_to_install.split('^')
#assert len(pkg_branch_test) <= 2, "Only allowed to specify one branch/version per package listing for installation."
#if len(pkg_branch_test) == 2:
#pkg_to_install, branch = pkg_branch_test
#pkg_to_install = strip_end_of_str(pkg_to_install)
#if len(pkg_to_install.split('/')) == 1:
#if len(pkg_to_install.split('/')) != 2:
#utils.how_to_specify_installation(pkg_to_install)
#raise SystemExit
#download_url = self.download_url.format(pkg_to_install=pkg_to_install)
#download_info = self.download_url_cmd.format(branch=branch, download_url=download_url)
#elif len(pkg_branch_test) == 1:
#branch = 'master'
#pkg_to_install = pkg_branch_test[0]
#pkg_to_install = strip_end_of_str(pkg_to_install)
#download_info = self.download_url.format(pkg_to_install=pkg_to_install)
# if a repo branch name is specified with a nested path, then change its name (in
# order to make sure that all branches installed have flattened names in the pkg_dir).
# (Eg. numpy on github has branches like this, where they are listed as as nested paths)
#branch = args.branch.split('/')
#if len(branch) == 1:
#branch = branch[0]
#elif len(branch) >= 1:
#branch = '_'.join(branch)
pkg_to_install = strip_end_of_str(pkg_to_install)
#if args.branch == 'master':
#download_info = self.download_url.format(pkg_to_install=pkg_to_install)
#else:
#download_info = self.download_url_cmd.format(branch=args.branch, download_url=self.download_url)
pkg_to_install_name = os.path.basename(pkg_to_install)
#self.install_download_cmd = self.install_download_cmd.format(download_info=download_info, branch=args.branch)
return pkg_to_install_name
def _download_pkg(self, pkg_to_install_name, branch_flattened_name, args, noise):
''' downloads/clones the specified package branch for installation '''
# tests whether the vc application is installed ('git', 'hg', 'bzr', etc)
app_check_cmd = self.application_check_cmd # not sure if this is platform neutral
app_type = app_check_cmd.split(' ')[0]
#app_type = args.repo_type
return_val = self.__cmd_output(app_check_cmd, verbose=False)
if return_val: # it would be zero if the program is installed (if anything other than zero, then it's not installed)
print("\nError: COULD NOT INSTALL {0} packages; are you sure {1} is installed?".format(args.pkg_type, app_type))
return None
# make the initial pkg_name_dir
pkg_name_dir = join(self.pkg_type_install_dir, pkg_to_install_name)
if not os.path.isdir(pkg_name_dir):
os.makedirs(pkg_name_dir)
utils.when_not_quiet_mode('Downloading {0} [{1}]'.format(pkg_to_install_name, branch_flattened_name), noise.quiet)
# download the branch to the pkg_name_dir
os.chdir(pkg_name_dir)
return_val = self.__cmd_output(self.install_download_cmd, verbose=noise.verbose)
if return_val != 0:
print("Could not properly download {0} [{1}] with {2}\n".format(pkg_to_install_name, branch_flattened_name, app_type))#args.repo_type))
### remove whatever may have been downloaded that didn't get successfully downloaded
something_downloaded_or_already_in_pkg_name_dir = os.listdir(pkg_name_dir)
if not something_downloaded_or_already_in_pkg_name_dir: # if nothing was downloaded or is already in the pkg_name_dir
shutil.rmtree(pkg_name_dir) # if the install didn't work, then remove the pkg_name_dir
if not os.listdir(self.pkg_type_install_dir): # if the pkg type install dir is empty, remove that
os.rmdir(self.pkg_type_install_dir)
if not os.listdir(self.lang_install_dir): # finally, if the lang install dir is empty, remove that
shutil.rmtree(self.lang_install_dir)
# if the pkg_name_dir is not empty, then just remove the specific branch_flattened_name that was attempted to
# be downloaded, and leave everything else in there alone.
else:
try:
shutil.rmtree(branch_flattened_name) #NOTE should probably use absolute dirs instead of this
# need this b/c maybe it didn't even create the branch_to_install_name dir; or, git automatically
# cleans up after itself, so the branch_to_install_name might not even exist b/c git deleted it already
except: pass
return None
elif return_val == 0: # the download was sucessfull
return True
def _installation_check(self, pkg_type, pkg_to_install_name, branch_name, everything_already_installed):
'''
To make sure that only one version of any given package can be turned on (/active) at any given
time for a specific version of the lang. If a package branch with the same name as an already
installed (turned off) pkg branch is attempting to be installed under the same pkg_type, then do
not allow this; however, do allow it for a pkg branch with the same name as an existing package
branch, but under a diff pkg type.
'''
pkg_name = pkg_to_install_name
all_branches_installed_for_pkgs_lang_ver = utils.branches_installed_for_given_pkgs_lang_ver(
self.lang_cmd,
pkg_to_install_name, everything_already_installed)
# make a list of any branch that is currently turned on for this package, regardless of pkg type;
# should only be one branch on at any given time!
any_package_branch_on = [branch for branch in all_branches_installed_for_pkgs_lang_ver if not branch.startswith('.__')]
# make a list of all branches (hidden renamed) currently installed for this package
#all_branches_already_installed = [branch.lstrip('.__') if branch.startswith('.__') else branch for branch in all_branches_installed_for_pkg]
#print('everything_already_installed:')
#print(everything_already_installed)
if self.lang_cmd in everything_already_installed:
lang_installed = self.lang_cmd
pkg_types_dict = everything_already_installed[self.lang_cmd]
else:
# if there are no packages installed (like when using for the first time for a language)
return True
for installed_pkg_type, pkgs_dict in pkg_types_dict.items():
for installed_pkg_name, branches_list in pkgs_dict.items():
# make a list of any branch that is currently turned on for this pkg name via this package type;
# should only be one branch on at a time for the package name across all pkg types.
pkg_branch_names_on_for_pkg_type = [branch for branch in branches_list if not branch.startswith('.__')]
# make a list of any branch that is currently turned off for this pkg name via package type
pkg_branch_names_off_for_pkg_type = [branch.lstrip('.__') for branch in branches_list if branch.startswith('.__')]
pkg_branch_names_all_for_pkg_type = pkg_branch_names_on_for_pkg_type + pkg_branch_names_off_for_pkg_type
# FIXME this part works, but refractor it so it isn't so nested
if any_package_branch_on:
#(ipy) (ipy)
if pkg_name == installed_pkg_name:
#(Github) (Github)
if pkg_type == installed_pkg_type:
#(ipy_master) (ipy_master)
if branch_name in pkg_branch_names_on_for_pkg_type:
print("Already installed & turned on.")
return False
#(ipy_master) (.__ipy_master renamed to ipy_master)
if branch_name in pkg_branch_names_off_for_pkg_type:
print("Already installed & turned off.")
return False
# these two statements below mean the same thing (i think); b/c if the branch is not
# in the installed branches for this pkg & pkg_type, then the branch would have to
# be (turned on) in a different pkg_type (under the same pkg name of course).
#(ipy_master) (anything but ipy_master)
if branch_name not in pkg_branch_names_all_for_pkg_type:
print("A branch of {0} is already turned on for {1}".format(pkg_name, lang_installed))
return False # b/c there is already a branch on somewhere
#(ipy_master) (in the list of any branch turned on for this pkg, regardless of pkg_type)
if branch_name in any_package_branch_on:
print("A branch of {0} is already turned on for {1}".format(pkg_name, lang_installed))
return False # b/c there is already a branch on somewhere
#(Github) (Local Repo)
elif pkg_type != installed_pkg_type:
#(ipy_master) (ipy_master)
if branch_name in pkg_branch_names_on_for_pkg_type:
#print("A branch of {} is already turned on under {} packages.".format(pkg_name, installed_pkg_type))
print("A branch of {0} is already turned on for {1}".format(pkg_name, lang_installed))
return False # b/c there is already a pkg branch turned on somewhere
#(ipy_master) (anything not ipy_master)
if branch_name not in pkg_branch_names_all_for_pkg_type:
print("A branch of {0} is already turned on for {1}".format(pkg_name, lang_installed))
return False # b/c there is already a pkg branch turned on somewhere
#if branch_name in any_package_branch_on:
#print("A branch of {} is already turned on.".format(pkg_name))
#return False # b/c there is already a branch on somewhere
### don't care about these tests
##(ipy) (sklearn)
#elif pkg_name != installed_pkg_name:
#(Github) (Github)
#if pkg_type == installed_pkg_type:
##(master) (master)
#if branch_name in installed_branch_names_raw:
#return False # b/c this doesn't matter
##(master) (anything not master)
#elif branch_name not in installed_branch_names_raw:
#return False # # b/c this doesn't matter
##(Github) (Local Repo)
#elif pkg_type != installed_pkg_type:
##(master) (master)
#if branch_name in installed_branch_names_raw:
#return False # b/c this doesn't matter
##(master) (anything but master)
#elif branch_name not in installed_branch_names_raw:
#return False # b/c this doesn't matter
elif not any_package_branch_on: # if no branch is turned on for this package
#(ipy) (ipy)
if pkg_name == installed_pkg_name:
#(Github) (Github)
if pkg_type == installed_pkg_type:
#(ipy_master) (ipy_master renamed from .__ipy_master)
if branch_name in pkg_branch_names_off_for_pkg_type:
print("Already installed & turned off.")
return False # b/c the branch is already installed for this pkg_name under this pkg_type but turned off.
#(ipy_master) (anything but ipy_master)
elif branch_name not in pkg_branch_names_off_for_pkg_type:
return True # b/c there are not any pkg branches turned on and this branch isn't already installed for this pkg_type
#(Github) (Local Repo)
#elif pkg_type != installed_pkg_type:
##(ipy_master) (ipy_master)
#if branch_name in pkg_branch_names_off_for_pkg_type:
# b/c it doesn't matter what branches are installed under a different pkg_type (so long as no branches are on)
#return True
##(ipy_master) (anything but ipy_master)
#elif branch_name not in pkg_branch_names_off_for_pkg_type:
# b/c it doesn't matter what branches are installed under a different pkg_type (so long as no branches are on)
#return True
### don't care about these tests
##(ipy) (sklearn)
#elif pkg_name != installed_pkg_name:
##(Github) (Github)
#if pkg_type == installed_pkg_type:
##(master) (master)
#if branch_name in installed_branch_names_renamed:
#return False # b/c this doesn't matter
##(master) (anything but master)
#elif branch_name not in installed_branch_names_renamed:
#return False # b/c this doesn't matter
##(Github) (Local Repo)
#elif pkg_type != installed_pkg_type:
##(master) (master)
#if branch_name in installed_branch_names_renamed:
#return False # b/c this doesn't matter
##(master) (anything but master)
#elif branch_name not in installed_branch_names_renamed:
#return False # b/c this doesn't matter
else:
# True means that the branch can be installed b/c it wasn't caught in one of the false returns above.
return True
# need to put pkg_to_install back in as an arg
def install(self, pkg_to_install, args, noise, download_pkg=True, everything_already_installed=None):
''' installs the specified package's branch '''
def do_install(pkg_to_install_name, branch_to_install):
pkg_install_dir = join(self.pkg_type_install_dir, pkg_to_install_name)
pkg_logs_dir = join(self.pkg_type_logs_dir, pkg_to_install_name)
branch_install_dir = join(pkg_install_dir, branch_to_install)
branch_logs_dir = join(pkg_logs_dir, branch_to_install)
# go into the branch install dir and try to install the branch
os.chdir(branch_install_dir)
record_file = self.lang_using._create_record_log_file(self.pkg_type_logs_dir, pkg_to_install_name, branch_to_install)
install_cmd = self.lang_using.get_install_cmd(pkg_to_install_name, branch_to_install, self.lang_cmd, record_file)
contents_of_pkg_branch_dir = os.listdir(branch_install_dir)
if self.lang_using.setup_file in contents_of_pkg_branch_dir:
if download_pkg:
utils.when_not_quiet_mode('Building & Installing {0} [{1}]'.format(
pkg_to_install_name, branch_to_install), noise.quiet)
else: # for turning branch back on
if noise.verbose:
print('Reinstalling {0} [{1}]'.format(pkg_to_install_name, branch_to_install))
# make the log files directory for this pkg (need this for the record file)
if not os.path.isdir(branch_logs_dir):
os.makedirs(branch_logs_dir)
# see if the newly cloned dir installs properly
return_val = self.__cmd_output(install_cmd, verbose=noise.verbose)
if return_val == 0:
if download_pkg:
print('Successfully installed {0} [{1}]'.format(pkg_to_install_name, branch_to_install))
else:
if noise.verbose:
print('Successfully reinstalled {0} [{1}]'.format(pkg_to_install_name, branch_to_install))
# if the pkg installed properly, then add the cmd that performed the installation to the database
#handle_db_after_an_install(self.pkg_type, pkg_to_install_name, branch_to_install,
#lang_cmd_for_install=self.lang_cmd,
#db_pname=installation_db_path)
else: # if it isn't 0, then it failed to install
# show output with a failed install in normal output mode (in verbose mode it will
# be printed out anyways from when trying to do the download and build).
if not noise.verbose:
try:
print('{0} {1}'.format(self.out.rstrip(), self.err.rstrip()))
print("\n\tCOULD NOT INSTALL {0} [{1}]".format(pkg_to_install_name, branch_to_install))
print("\tA likely cause is a dependency issue.")
print("\t...see Traceback for information.")
except UnicodeEncodeError:
print("\n\tCOULD NOT INSTALL {0} [{1}]".format(pkg_to_install_name, branch_to_install))
print("\tA likely cause is a dependency issue.")
if not download_pkg:
# remove the stuff for both download_pkg or not, but only print this stuff when trying to turn a package
# back on (b/c when doing an install for the first time, the pkg won't have ever been used anyways).
print('Removing {0} [{1}]'.format(pkg_to_install_name, branch_to_install))
print("Reinstall a fresh install to use the package.")
# remove stuff with failed install
self._remove_install_dirs(pkg_to_install_name, branch_to_install, pkg_install_dir, branch_install_dir, noise)
# likewise, remove the pkg branch log dir
self._remove_log_dirs(pkg_to_install_name, branch_to_install, pkg_logs_dir, branch_logs_dir, noise)
else:
print("\n\tCANNOT INSTALL {0} [{1}]".format(pkg_to_install_name, branch_to_install))
print("\tThere is no {0} in this repo.".format(self.lang_using.setup_file))
if not download_pkg:
print('Removing {0} [{1}]'.format(pkg_to_install_name, branch_to_install))
print("Reinstall a fresh install to use the package.")
# if no setup file, then remove the branch dir that was attempted to be downloaded & installed
self._remove_install_dirs(pkg_to_install_name, branch_to_install, pkg_install_dir, branch_install_dir, noise)
######################### End of embedded function #########################
if download_pkg: # this is for the initial installation
#pkg_to_install_name, branch_to_install = self.parse_pkg_to_install_name(pkg_to_install)
pkg_to_install_name = self.parse_pkg_to_install_name(args.pkg_to_install) # this is just the pkg_to_install's basename
download_url = self.download_url.format(pkg_to_install=args.pkg_to_install)
# check to see if the download url actually exists
error_msg = "Error: could not get package {} from\n{}".format(pkg_to_install_name, download_url)
if self.__class__.__name__ != 'LocalRepo':
try:
resp = urlopen(download_url)
if resp.getcode() != 200: # will be 200 if website exists
raise Exception
except:
raise SystemExit(error_msg)
if args.branch in {'master', 'default'}:
#download_info = self.download_url.format(pkg_to_install=args.pkg_to_install)
download_info = download_url
else:
download_info = self.download_url_cmd.format(branch=args.branch, download_url=self.download_url)
branch_flattened_name = utils.branch_name_flattener(args.branch)
self.install_download_cmd = self.install_download_cmd.format(download_info=download_info, branch=branch_flattened_name)
print('\n--> {0} [{1}]'.format(pkg_to_install_name, branch_flattened_name))
should_it_be_installed = self._installation_check(args.pkg_type, pkg_to_install_name, branch_flattened_name,
everything_already_installed)
if should_it_be_installed:
if not os.path.isdir(self.pkg_type_install_dir):
os.makedirs(self.pkg_type_install_dir)
if not os.path.isdir(self.pkg_type_logs_dir):
os.makedirs(self.pkg_type_logs_dir)
download_success = self._download_pkg(pkg_to_install_name, branch_flattened_name, args, noise)
# if the download fails, it is taken care of inside self._download_pkg
if download_success:
do_install(pkg_to_install_name, branch_flattened_name)
else: # when don't have to download pkg first -- this is for turning pkg back on (from being turned off)
# pkg_to_install is passed to the install func from the turn_on method & self.branch_to_turn_on_renamed is from turn_on
pkg_to_install_name = pkg_to_install
branch_to_install = self.branch_to_turn_on_renamed
do_install(pkg_to_install_name, branch_to_install)
def update(self, lang_to_update, pkg_to_update_name, branch_to_update, noise):
''' updates the specified package's branch '''
pkg_update_dir = join(self.pkg_type_install_dir, pkg_to_update_name)
branch_update_dir = join(pkg_update_dir, branch_to_update)
os.chdir(branch_update_dir)
print('\n--> {0} [{1}]'.format(pkg_to_update_name, branch_to_update))
utils.when_not_quiet_mode('Checking for updates', noise.quiet)
return_val = self.__cmd_output(self.update_cmd, verbose=False)
# return_val = self.__cmd_output(self.update_cmd, verbose=noise.verbose)
if return_val != 0:
try:
print('{0} {1}'.format(self.out.rstrip(), self.err.rstrip()))
except UnicodeEncodeError:
pass
print("\nCould not properly update {0} [{1}]".format(pkg_to_update_name, branch_to_update))
print("Likely a network connection error. Try again in a moment.")
return
output = self.out
# do this b/c hg & bzr repos have multi-line output and the last item is all we want; for git
# it doesn't matter, there is only a one item output, so it will be what we want regardless.
output_end = output.splitlines()[-1]
if self.up_to_date_output in output_end: # this is if it's already up to date.
#print(output_end) # this is different for the different repo_types, so just print out a common thing:
# print('Already up to date.')
print(self.up_to_date_output)
return
else: # this is if it's not up to date (and if not, then update it here)
if noise.verbose:
print(output.rstrip()) # prints all output from the pull
# see if the package installs from the update correctly
# use the same version of the language that was used to install the package to also update the package.
record_file = self.lang_using._create_record_log_file(self.pkg_type_logs_dir, pkg_to_update_name, branch_to_update)
# could also use the json db to get the lang for updating as well
update_install_cmd = self.lang_using.get_install_cmd(pkg_to_update_name, branch_to_update, lang_to_update, record_file)
contents_of_pkg_branch_dir = os.listdir(branch_update_dir)
if self.lang_using.setup_file in contents_of_pkg_branch_dir:
return_val = self.__cmd_output(update_install_cmd, verbose=noise.verbose)
else:
print("UPDATE FAILED for {0} [{1}]".format(pkg_to_update_name, branch_to_update))
print("There is no longer a {0} to use for installing the package.".format(self.lang_using.setup_file))
print("Try removing the package & then reinstalling it.")
return
if return_val == 0:
### combine the new log file just produced from the update with the other log file that
# already exists from the initial install (or a previous update); there will only be
# two log files at this point -- one older, perhaps already a combination of previous log
# files, and this newly created log file. And when done with this, there will only be
# one log file again -- that which is a combination of all previous log files.
pkg_logs_dir = join(self.pkg_type_logs_dir, pkg_to_update_name)
branch_logs_dir = join(pkg_logs_dir, branch_to_update)
record_fnames = glob.glob(join(branch_logs_dir, 'log_*.txt'))
record_files = [open(rec_file, 'r').readlines() for rec_file in record_fnames]
record_files_combined = list(set([rf for rf in itertools.chain.from_iterable(record_files)]))
# create a new combined log file
record_file = self.lang_using._create_record_log_file(self.pkg_type_logs_dir, pkg_to_update_name, branch_to_update)
with open(record_file, 'w') as f:
for i in record_files_combined:
f.write(i)
# delete all old logfiles, so that there is only the one combined log file that remains
for rf in record_fnames:
os.remove(rf)
print('Successfully updated {0} [{1}]'.format(pkg_to_update_name, branch_to_update))
else: # if not 0, then it failed to install properly
if not noise.verbose: # when in quiet mode, show output with a failed update installation to see the Traceback
try:
print('{0} {1}'.format(self.out.rstrip(), self.err.rstrip()))
print(utils.status("\tUPDATE FAILED for {0} [{1}]".format(pkg_to_update_name, branch_to_update)))
print("\tA likely cause is a dependency issue, eg. needing a (newer) dependency.")
print("\t...see Traceback for information.")
except UnicodeEncodeError:
print(utils.status("\tUPDATE FAILED for {0} [{1}]".format(pkg_to_update_name, branch_to_update)))
print("\tA likely cause is a dependency issue, eg. needing a (newer) dependency.")
# TODO FIXME maybe something else needs to be done here -- like removing the update since it failed to install?
def _remove_installed_files(self, pkg_to_remove_name, branch_to_remove_name, branch_installation_log_files, noise):
''' remove the files installed in the userbase by using the install log file '''
for branch_install_log in branch_installation_log_files:
# platform neutral way
with open(branch_install_log, 'r') as install_log:
for ln in install_log:
ln = ln.rstrip()
# not completely sure about this, but i think it works fine.
if os.path.exists(ln):
try:
if os.path.isfile(ln):
os.remove(ln)
elif os.path.isdir(ln):
shutil.rmtree(ln)#, ignore_errors=True)
except:
if noise.verbose:
print("Error: Exception in removing {0} [{1}] {2}".format(pkg_to_remove_name,
branch_to_remove_name,
str(sys.exc_info())))
#else:
#print("Not found: {0}".format(ln))
def _remove_empty_dirs_recursively(self, starting_dir, noise):
''' recursively removes all empty dirs under the starting_dir '''
for root, dirs, files in os.walk(starting_dir, topdown=False):
for dir_name in dirs:
d_path = join(root, dir_name)
if not os.listdir(d_path): #to check wither the dir is empty
if noise.verbose:
print("Deleting empty dir: {0}".format(d_path))
os.rmdir(d_path)
def _remove_log_dirs(self, pkg_to_remove_name, branch_to_remove_name, pkg_logs_dir, branch_logs_dir, noise):
if noise.verbose:
print('Removing installation log files for {0} [{1}]'.format(pkg_to_remove_name, branch_to_remove_name))
if os.path.isdir(branch_logs_dir): # need this check for turned off branches (b/c if turned off, then this dir won't exist anyways)
shutil.rmtree(branch_logs_dir)
def remove_dir(dir_to_remove, str_out):
if os.path.isdir(dir_to_remove):
if not os.listdir(dir_to_remove):
if noise.verbose:
print(str_out)
shutil.rmtree(dir_to_remove)
# checks whether a pkg_logs_dir is completely empty (meaning, there are no branches for the pkg),
# and if empty, then this removes the empty pkg_logs_dir
str_out = "Removing the package logs dir {0} because there are no branches in it...".format(pkg_to_remove_name)
remove_dir(pkg_logs_dir, str_out)
# likewise, this checks whether a pkg_type_logs_dir is completely empty (meaning, there are no packages
# for the pkg type), and if empty, then this removes the empty pkg_type_log_dir.
str_out = "Removing the package type logs dir {0} because there are no packages in it...".format(self.pkg_type_logs_dir)
remove_dir(self.pkg_type_logs_dir, str_out)
# and the same goes for the lang_logs_dir
str_out = "Removing the language logs dir {0} because there are no package types in it...".format(self.lang_logs_dir)
remove_dir(self.lang_logs_dir, str_out)
def _remove_install_dirs(self, pkg_to_remove_name, branch_to_remove_name, pkg_dir, branch_dir, noise):
if noise.verbose:
print('Removing the downloaded package contents for {0} [{1}]'.format(pkg_to_remove_name, branch_to_remove_name))
shutil.rmtree(branch_dir)
def remove_dir(dir_to_remove, str_out):
if not os.listdir(dir_to_remove):
if noise.verbose:
print(str_out)
shutil.rmtree(dir_to_remove)
### checks whether a pkg_dir is completely empty (meaning, there are no installed branches for the pkg), and
# if it is empty, then this removes the empty pkg_dir (its corresponding pkg_logs_dir is removed above).
str_out = 'Removing the package dir {0} because there are no branches installed in it...'.format(pkg_to_remove_name)
remove_dir(pkg_dir, str_out)
### likewise, this checks whether a pkg_type_install_dir is completely empty (meaning, there are no installed pkgs for the pkg type),
# and if it is empty, then this removes the empty pkg_type_install_dir (its corresponding pkg_type_logs_dir is removed above).
str_out = 'Removing the package type dir {0} because there are no packages installed in it...'.format(self.pkg_type_install_dir)
remove_dir(self.pkg_type_install_dir, str_out)
### and the same goes for the lang_install_dir as well.
str_out = 'Removing the language install dir {0} because there are no packages installed in it...'.format(self.lang_install_dir)
remove_dir(self.lang_install_dir, str_out)
def remove(self, pkg_to_remove_name, branch_to_remove_name, noise):
''' removes/uninstalls the specified package's branch, and if the last branch is removed from a package dir,
then the package dir is removed as well. Likewise, if the last package is removed from a package type, then
the package type dir is removed. Likewise for the language dir. And the same procedure also goes for the
install_logs dirs; meaning, if they are empty, then they get removed too '''
if branch_to_remove_name.startswith('.__'):
# for a branch that is turned off, need to make sure that the branches's dir (with ".__") is what is removed.
actual_dir_name_for_branch_to_remove = branch_to_remove_name
branch_to_remove_name = branch_to_remove_name.lstrip('.__')
else:
# for a branch that is turned on, this will be the same thing
actual_dir_name_for_branch_to_remove = branch_to_remove_name
utils.when_not_quiet_mode('\nRemoving {0} [{1}]'.format(pkg_to_remove_name, branch_to_remove_name), noise.quiet)
# remove the installed branch from the installation area (the area produced from using --user)
if noise.verbose:
print('Removing build & installation files for {0} [{1}]'.format(pkg_to_remove_name, branch_to_remove_name))
pkg_logs_dir = join(self.pkg_type_logs_dir, pkg_to_remove_name)
branch_logs_dir = join(pkg_logs_dir, branch_to_remove_name)
branch_installation_log_files = glob.glob(join(branch_logs_dir, 'log_*.txt')) # there should only be one (combined) logfile at any time.
# uninstall all stuff installed for this branch, as indicated is installed via its install log file
self._remove_installed_files(pkg_to_remove_name, branch_to_remove_name, branch_installation_log_files, noise)
# remove the installation log files (and the subsequent empty dirs) just used.
self._remove_log_dirs(pkg_to_remove_name, branch_to_remove_name, pkg_logs_dir, branch_logs_dir, noise)
# remove the downloaded branch from the pkg_dir (and the subsequent empty dirs).
pkg_dir = join(self.pkg_type_install_dir, pkg_to_remove_name)
branch_dir = join(pkg_dir, actual_dir_name_for_branch_to_remove)
self._remove_install_dirs(pkg_to_remove_name, branch_to_remove_name, pkg_dir, branch_dir, noise)
# look recursively to see if the dirs in userbase are empty and remove those empty dirs.
self._remove_empty_dirs_recursively(user_base, noise)
# remove the branch listing from the installation_db
#if noise.verbose:
#print('Removing the {0} [{1}] listing from {2}'.format(pkg_to_remove_name, branch_to_remove_name, installation_db))
#handle_db_for_removal(self.pkg_type, pkg_to_remove_name, branch_to_remove_name, installation_db_path)
print('Successfully uninstalled {0} [{1}]'.format(pkg_to_remove_name, branch_to_remove_name))
#when_not_quiet_mode("Don't forget to remove {0}^{1} from your {2} file.".format(pkg_to_remove_name, branch_to_remove_name, packages_file), noise.quiet)
def turn_off(self, pkg_to_turn_off_name, branch_to_turn_off_name, noise):
''' this makes the package inactive, so that other versions of the same package can be turned on or so
that a system level package of the same name (if there is one) can be used. By being inactive, it hides
the installed pkg (by renaming it as, '.__branch_name'), so that it doesn't need to be re-downloaded &
re-built if turned back on; note however, it does actually remove the stuff put into userbase to
remove the branches's files that were installed into the path. This is nice b/c the downloads and builds
are what take so long for most package installations.'''
utils.when_not_quiet_mode('\nTurning off {0} [{1}]'.format(pkg_to_turn_off_name, branch_to_turn_off_name), noise.quiet)
# remove stuff from userbase with the log file
if noise.verbose:
print('Removing built & installed files for {0} [{1}]'.format(pkg_to_turn_off_name, branch_to_turn_off_name))
pkg_logs_dir = join(self.pkg_type_logs_dir, pkg_to_turn_off_name)
branch_logs_dir = join(pkg_logs_dir, branch_to_turn_off_name)
branch_installation_log_files = glob.glob(join(branch_logs_dir, 'log_*.txt')) # there should only be one (combined) logfile at any time.
# uninstall all stuff installed for this branch, as indicated is installed via its install log file
self._remove_installed_files(pkg_to_turn_off_name, branch_to_turn_off_name, branch_installation_log_files, noise)
# remove the installation log files (and subsequent empty dirs) just used
self._remove_log_dirs(pkg_to_turn_off_name, branch_to_turn_off_name, pkg_logs_dir, branch_logs_dir, noise)
# look recursively to see if the dirs in userbase are empty and removes those empty dirs.
self._remove_empty_dirs_recursively(user_base, noise)
# rename the branch dir name (in the pkg_dir)
if noise.verbose:
print('Renaming the downloaded package {0} [{1}]'.format(pkg_to_turn_off_name, branch_to_turn_off_name))
pkg_dir = join(self.pkg_type_install_dir, pkg_to_turn_off_name)
branch_dir = join(pkg_dir, branch_to_turn_off_name)
branch_to_turn_off_renamed = '.__{0}'.format(branch_to_turn_off_name)
branch_to_turn_off_renamed_dir = join(pkg_dir, branch_to_turn_off_renamed)
os.rename(branch_dir, branch_to_turn_off_renamed_dir)
# rename the branch in the installation_db
#if noise.verbose:
#print('Renaming the package {0} [{1}] in the {2} file.'.format(
#pkg_to_turn_off_name, branch_to_turn_off_name, installation_db))
#handle_db_for_branch_renaming(self.pkg_type, pkg_to_turn_off_name, branch_to_turn_off_name,
#branch_to_turn_off_renamed, db_pname=installation_db_path)
print('Successfully turned off {0} [{1}]'.format(pkg_to_turn_off_name, branch_to_turn_off_name))
def _turn_on_check(self, pkg_type, pkg_to_turn_on_name, branch_to_turn_on, everything_already_installed, noise):
all_branches_installed_for_pkgs_lang_ver = utils.branches_installed_for_given_pkgs_lang_ver(
self.lang_cmd, pkg_to_turn_on_name, everything_already_installed)
any_package_branch_on = [branch for branch in all_branches_installed_for_pkgs_lang_ver if not branch.startswith('.__')]
# NOTE something about this seems very wrong, but just keeping incase I'm missing something.
#if self.lang_cmd in everything_already_installed:
#pkg_types_dict = everything_already_installed[self.lang_cmd]
#for installed_pkg_type, pkgs_dict in pkg_types_dict.items():
#for installed_pkg_name, branches_list in pkgs_dict.items():
#if any_package_branch_on:
#print("Cannot turn on {0} {1} [{2}] {3} because".format(pkg_type, pkg_to_turn_on_name, branch_to_turn_on, self.lang_cmd))
#utils.when_not_quiet_mode("a version of {0} is already turned on for {1}".format(pkg_to_turn_on_name, self.lang_cmd), noise.quiet)
#utils.when_not_quiet_mode("[Execute `{} list` to see currently turned on packages]".format(name), noise.quiet)
#return False
#else:
#return True
if any_package_branch_on:
print("Cannot turn on {0} {1} [{2}] {3} because".format(pkg_type, pkg_to_turn_on_name, branch_to_turn_on, self.lang_cmd))
utils.when_not_quiet_mode("a version of {0} is already turned on for {1}".format(pkg_to_turn_on_name, self.lang_cmd), noise.quiet)
utils.when_not_quiet_mode("[Execute `{} list` to see currently turned on packages]".format(name), noise.quiet)
return False
else:
return True
def turn_on(self, pkg_to_turn_on_name, branch_to_turn_on_name, args, everything_already_installed, noise):
self.branch_to_turn_on_renamed = branch_to_turn_on_renamed = branch_to_turn_on_name.lstrip('.__')
utils.when_not_quiet_mode('\nAttempting to turn on {0} [{1}]'.format(pkg_to_turn_on_name, branch_to_turn_on_renamed), noise.quiet)
should_turn_back_on = self._turn_on_check(self.pkg_type, pkg_to_turn_on_name, branch_to_turn_on_renamed, everything_already_installed, noise)
if should_turn_back_on:
# rename the branch dir name back to it's original name
if noise.verbose:
print('Renaming {0} {1}'.format(pkg_to_turn_on_name, branch_to_turn_on_renamed))
pkg_dir = join(self.pkg_type_install_dir, pkg_to_turn_on_name)
branch_dir_raw_name = join(pkg_dir, branch_to_turn_on_name)
branch_dir_renamed = join(pkg_dir, branch_to_turn_on_renamed)
os.rename(branch_dir_raw_name, branch_dir_renamed)
# rename the branch back to it's original name in the installation_db
#if noise.verbose:
#print('Renaming the package {0} [{1}] in the {2} file.'.format(
#pkg_to_turn_on_name, branch_to_turn_on_name, installation_db))
#handle_db_for_branch_renaming(self.pkg_type, pkg_to_turn_on_name, branch_to_turn_on_name,
#branch_to_turn_on_renamed, db_pname=installation_db_path)
# reinstall the branch files from the branch installation dir back into userbase
if noise.verbose:
print('Reinstalling {0} {1}'.format(pkg_to_turn_on_name, branch_dir_renamed))
Package.install(self, pkg_to_turn_on_name, args, noise, download_pkg=False)
print('Successfully turned on {0} [{1}]'.format(pkg_to_turn_on_name, branch_to_turn_on_renamed))
class Git(Package):
def __init__(self, lang_arg, pkg_type, install_dirs, args):
self.repo_type = 'git'
self.application_check_cmd = '{} --version'.format(self.repo_type)
super(Git, self).__init__(lang_arg, pkg_type, install_dirs, args)
def install(self, pkg_to_install, args, noise, **kwargs):
self.download_url_cmd = '-b {branch} {download_url}'
#self.install_download_cmd = 'git clone --single-branch {0}' # maybe want "git clone --recursive" instead?
self.install_download_cmd = 'git clone {download_info} {branch}'
#self.install_download_cmd = 'git clone --recursive {download_info} {branch}'
Package.install(self, pkg_to_install, args, noise, **kwargs)
def update(self, lang_to_update, pkg_to_update, branch_to_update, noise):
#self.update_cmd = 'git pull && git submodule update --recursive' # NOTE this might break the up_to_date_output check above
self.update_cmd = 'git pull'
self.up_to_date_output = 'Current branch {} is up to date.'.format(branch_to_update)
Package.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
class Mercurial(Package):
def __init__(self, lang_arg, pkg_type, install_dirs, args):
self.repo_type = 'hg'
self.application_check_cmd = '{} --version'.format(self.repo_type)
super(Mercurial, self).__init__(lang_arg, pkg_type, install_dirs, args)
def install(self, pkg_to_install, args, noise, **kwargs):
#self.download_url_cmd = '-r {0} {1}' # need to look more into these commands
self.download_url_cmd = '-b {branch} {download_url}'
self.install_download_cmd = 'hg clone {download_info} {branch}'
Package.install(self, pkg_to_install, args, noise, **kwargs)
def update(self, lang_to_update, pkg_to_update, branch_to_update, noise):
self.update_cmd = 'hg pull -u'
self.up_to_date_output = 'no changes found'
Package.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
class Bazaar(Package):
def __init__(self, lang_arg, pkg_type, install_dirs, args):
self.repo_type = 'bzr'
self.application_check_cmd = '{} --version'.format(self.repo_type)
super(Bazaar, self).__init__(lang_arg, pkg_type, install_dirs, args)
def install(self, pkg_to_install, args, noise, **kwargs):
self.download_url_cmd = '{branch} {download_url}' # i think this is how you install a specific branch (not sure though)
self.install_download_cmd = 'bzr branch {download_info} {branch}' # bzr uses branch instead of clone
Package.install(self, pkg_to_install, args, noise, **kwargs)
def update(self, lang_to_update, pkg_to_update, branch_to_update, noise):
self.update_cmd = 'bzr pull'
self.up_to_date_output = 'No revisions or tags to pull.'
Package.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
class RepoTypeCheck(Git, Mercurial, Bazaar):
def install(self, pkg_to_install, args, noise, **kwargs):
if self.repo_type == 'git':
Git.install(self, pkg_to_install, args, noise, **kwargs)
elif self.repo_type == 'hg':
Mercurial.install(self, pkg_to_install, args, noise, **kwargs)
elif self.repo_type == 'bzr':
Bazaar.install(self, pkg_to_install, args, noise, **kwargs)
def update(self, lang_to_update, pkg_to_update, branch_to_update, noise):
pkg_install_dir = join(self.pkg_type_install_dir, pkg_to_update)
branch_install_dir = join(pkg_install_dir, branch_to_update)
contents_of_branch_install_dir = os.listdir(branch_install_dir)
if '.git' in contents_of_branch_install_dir:
Git.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
elif '.hg' in contents_of_branch_install_dir:
Mercurial.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
elif '.bzr' in contents_of_branch_install_dir:
Bazaar.update(self, lang_to_update, pkg_to_update, branch_to_update, noise)
class Github(Git):
def install(self, pkg_to_install, args, noise, **kwargs):
self.repo_type = 'git'
#self.download_url = 'https://github.com/{pkg_to_install}.git'
#self.download_url = 'https://github.com/{pkg_to_install}'.format(pkg_to_install=args.pkg_to_install)
self.download_url = 'https://github.com/{pkg_to_install}'.format(pkg_to_install=pkg_to_install)
Git.install(self, pkg_to_install, args, noise, **kwargs)
class Bitbucket(RepoTypeCheck):
def install(self, pkg_to_install, args, noise, **kwargs):
self.repo_type = args.repo_type
if self.repo_type == 'hg':
self.download_url = 'https://bitbucket.org/{pkg_to_install}'.format(pkg_to_install=pkg_to_install)
elif self.repo_type == 'git':
#self.download_url = 'https://bitbucket.org/{pkg_to_install}.git'
#self.download_url = 'https://bitbucket.org/{pkg_to_install}'.format(pkg_to_install=args.pkg_to_install)
self.download_url = 'https://bitbucket.org/{pkg_to_install}'.format(pkg_to_install=pkg_to_install)
RepoTypeCheck.install(self, pkg_to_install, args, noise, **kwargs)
class LocalRepo(RepoTypeCheck):
def install(self, pkg_to_install, args, noise, **kwargs):
#self.download_url = args.pkg_to_install
self.download_url = pkg_to_install # will be a path on local filesystem
self.repo_type = args.repo_type
RepoTypeCheck.install(self, pkg_to_install, args, noise, **kwargs)
# TODO to add in ability to use urls for ssh access and the like.
#class RemoteRepo(RepoTypeCheck):
#def install(self, pkg_to_install, noise):
#pass
'''
class Stable(Package):
def __init__(self, args, install_dirs):
self.repo_type = 'stable'
self.info_url = 'https://pypi.python.org/pypi/{pkg_name}'
#self.application_check_cmd = 'git --version' # need to do a check to see if pkg exists in the first place
super(Stable, self).__init__(args, install_dirs)
def install(self, args, noise, **kwargs):
#self.download_url = 'https://pypi.python.org/pypi/{pkg_name}/json'
#url_data = urllib.urlopen(self.download_url)
#data = json.loads(url_data.read())
## # latest pkg version from pypi
#self.pkg_version = data['info']['version'] # this is the name that the dir gets (instead of a branch name)
self.download_url_cmd = '-b {branch} {download_url}'
self.install_download_cmd = 'git clone (download_info} {branch}' #(dir name)
Package.install(self, args, noise, **kwargs)
def update(self):
if pkg_version < current_pkg_version:
#then update pkg
pass
try: #### for python2 # FIXME this is too hacky
from xmlrpclib import ServerProxy
from urllib import urlopen
except ImportError: #### for python3
from xmlrpc.client import ServerProxy
from urllib.request import urlopen
pkg_name = 'ipython' # to be passed into the script
client = ServerProxy('http://pypi.python.org/pypi')
# see if it can be downloaded
all_packages_available = client.list_packages()
if pkg_name not in all_packages_available:
raise SystemExit("{} not available for download")
pkg_version = client.package_releases(pkg_name)[0] # a list of 1 item
pkg_info = client.release_urls(pkg_name, pkg_version) # will be a list of dicts
download_urls = [d['url'] for d in pkg_info if 'url' in d] # could be .tar.gz/.zip/etc.
download_url = download_urls[0] # how to decide whether to pick the .zip/.tar.gz/etc?
download_url_basename = os.path.basename(download_url)
#urllib.urlretrieve(download_urls[0], '/tmp/{}'.format(download_url_basename))
with open('/tmp/{}'.format(download_url_basename), 'wb') as f:
f.write(urlopen(download_url).read())
'''
def create_pkg_inst(lang_arg, pkg_type, install_dirs, args=None, packages_file=None): #TODO args doesn't need to be passed in here...but maybe it could be useful later
''' install_dirs is a dict with the installed_pkgs_dir and the install_logs_dir '''
# for future pkg_types, just add them to this dict
supported_pkg_types = dict(github=Github, bitbucket=Bitbucket,
local=LocalRepo
#remote=RemoteRepo # TODO
#stable=Stable # TODO
)
def make_inst(pkg_type_cls):
return pkg_type_cls(lang_arg, pkg_type, install_dirs, args)
try: # make a class instance of the relevant type
return make_inst(supported_pkg_types[pkg_type])
except KeyError:
if packages_file: # installs from the pkgs file are the only thing that get this argument
not_pkg_type = '\nError: {0} in your {1} is an unrecognized package type.\n'.format(pkg_type, packages_file)
raise SystemExit(not_pkg_type)
else:
not_pkg_type = '\nError: {0} is an unrecognized package type.\n'.format(pkg_type)
raise SystemExit(not_pkg_type)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Flask + Requests-OAuthlib example.
This example demonstrates how to integrate a server application with
Authentiq Connect, using standard OAuth 2.0. It uses the popular
requests-oauthlib module to make this trivial in Flask.
As with all plain OAuth 2.0 integrations, we use the UserInfo endpoint to
retrieve the user profile after authorization. Check out our native
AuthentiqJS snippet or an OpenID Connect library to optimise this.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import os
import oauthlib.oauth2.rfc6749.errors as oauth2_errors
import requests
from flask import (
Flask,
abort,
g,
redirect,
render_template,
request,
session,
url_for
)
from requests_oauthlib import OAuth2Session
class Config(object):
"""
Flask configuration container.
"""
DEBUG = True
TESTING = False
SECRET_KEY = "wu8EiPh2LeeChaikoh3doo2n"
AUTHENTIQ_BASE = "https://connect.authentiq.io/"
AUTHORIZE_URL = AUTHENTIQ_BASE + "authorize"
TOKEN_URL = AUTHENTIQ_BASE + "token"
USERINFO_URL = AUTHENTIQ_BASE + "userinfo"
# The following app is registered at Authentiq Connect.
CLIENT_ID = os.environ.get("CLIENT_ID", "examples-flask-native")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET", "ed25519")
# Personal details requested from the user. See the "scopes_supported" key in
# the following JSON document for an up to date list of supported scopes:
#
# https://connect.authentiq.io/.well-known/openid-configuration
#
REQUESTED_SCOPES = ["openid", "aq:name", "email", "aq:push"]
PORT = 8000
REDIRECT_URL = "http://localhost:%d/authorized" % PORT
DISPLAY = "modal"
app = Flask(__name__)
app.config.from_object(Config)
@app.before_request
def requests_session():
g.user = g.userinfo = None
# TODO: all this stuff is plain oauth2, for oidc it's much simpler
# should support both flows here for demo purposes
if "token" in session:
# Pass in our token; requests-oauthlib will
# take care of setting the access_token in the Authorization header
authentiq = OAuth2Session(
CLIENT_ID,
token=session.get("token")
)
# Now we can use the access_token to retrieve an OpenID Connect
# compatible UserInfo structure from the provider. Once again,
# requests-oauthlib adds a valid Authorization header for us.
#
# Note that this request can be optimized out if using an OIDC or
# native Authentiq Connect client.
try:
userinfo = authentiq.get(USERINFO_URL).json()
# store user and userinfo as Flask globals
g.user = userinfo["sub"]
g.userinfo = userinfo
print("User {} is signed in".format(g.user))
except ValueError:
app.logger.warning("No user is signed in")
g.user = g.userinfo = None
except oauth2_errors.OAuth2Error as e:
code = e.status_code or 400
description = "Provider returned: " + (e.description or e.error)
app.logger.error("%d: %s" % (code, description))
g.user = g.userinfo = None
# The HTTP request to the UserInfo endpoint failed.
except requests.exceptions.HTTPError as e:
abort(502, description="Request to userinfo endpoint failed: " +
e.response.reason)
@app.route("/")
def index():
state = None
# if user is not logged in, then create a new session
if g.user is None:
# Check if redirect_uri matches with the one registered with the
# example client.
assert url_for("authorized", _external=True) == REDIRECT_URL, (
"For this demo to work correctly, please make sure it is "
"hosted on localhost, so that the redirect URL is exactly " +
REDIRECT_URL + ".")
# Initialise an authentication session. Here we pass in scope and
# redirect_uri explicitly, though when omitted defaults will be taken
# from the registered client.
authentiq = OAuth2Session(
client_id=CLIENT_ID,
scope=REQUESTED_SCOPES,
redirect_uri=url_for("authorized", _external=True),
)
# Build the authorization URL and retrieve some client state.
authorization_url, state = authentiq.authorization_url(AUTHORIZE_URL)
# Save state to match it in the response.
session["state"] = state
# Redirect to the Authentiq Connect authentication endpoint.
return render_template("index.html",
provider_uri=AUTHENTIQ_BASE,
client_id=CLIENT_ID,
scope=" ".join(REQUESTED_SCOPES),
redirect_uri=REDIRECT_URL,
state=state,
display=DISPLAY,
logout_uri=url_for(".logout"))
@app.route("/authorized")
def authorized():
"""
OAuth 2.0 redirection point.
"""
# Pass in our client side crypto state; requests-oauthlib will
# take care of matching it in the OAuth2 response.
authentiq = OAuth2Session(CLIENT_ID, state=session.get("state"))
try:
error = request.args["error"]
oauth2_errors.raise_from_error(error, request.args)
except KeyError:
pass
except oauth2_errors.OAuth2Error as e:
code = e.status_code or 400
description = "Provider returned: " + (e.description or e.error)
app.logger.error("%d: %s" % (code, description))
# Redirect to the Authentiq Connect authentication endpoint.
return render_template("authorized.html",
provider_uri=AUTHENTIQ_BASE,
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URL,
state=session.get("state"),
display=DISPLAY,
redirect_to=url_for(".index"))
try:
# Use our client_secret to exchange the authorization code for a
# token. Requests-oauthlib parses the redirected URL for us.
# The token will contain the access_token, a refresh_token, and the
# scope the end-user consented to.
token = authentiq.fetch_token(TOKEN_URL,
client_secret=CLIENT_SECRET,
authorization_response=request.url)
session["token"] = token
app.logger.info("Received token: %s" % token)
# The incoming request looks flaky, let's not handle it further.
except oauth2_errors.OAuth2Error as e:
description = "Request to token endpoint failed: " + \
(e.description or e.error)
abort(e.status_code or 400, description=description)
# The HTTP request to the token endpoint failed.
except requests.exceptions.HTTPError as e:
code = e.response.status_code or 502
description = "Request to token endpoint failed: " + e.response.reason
abort(code, description=description)
# Display the structure, use userinfo["sub"] as the user's UUID.
# return jsonify(userinfo)
# Redirect to the Authentiq Connect authentication endpoint.
return render_template("authorized.html",
provider_uri=AUTHENTIQ_BASE,
client_id=CLIENT_ID,
redirect_uri=REDIRECT_URL,
state=session.get("state"),
display=DISPLAY,
redirect_to=url_for(".index"))
@app.route("/logout")
def logout():
for key in {"user", "token"}:
try:
del session[key]
except KeyError:
pass
return redirect(url_for(".index"))
if __name__ == "__main__":
if app.debug:
import os
# Allow insecure oauth2 when debugging
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
# Explicitly set `host=localhost` in order to get the correct redirect_uri.
app.run(host="localhost", port=PORT)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1.contrib import list_extensions as nova_list_extensions
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1 import security_groups as nova_security_groups
from novaclient.v1_1 import servers as nova_servers
from horizon import conf
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
class VNCConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class RDPConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_rdp_console method.
"""
_attrs = ['url', 'type']
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host', 'OS-EXT-AZ:availability_zone',
'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
if not self.image:
return "(not found)"
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return "(not found)"
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
class NovaUsage(base.APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(base.APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup which wraps its
rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@cached_property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
manager = nova_rules.SecurityGroupRuleManager(None)
rule_objs = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return [SecurityGroupRule(rule) for rule in rule_objs]
class SecurityGroupRule(base.APIResourceWrapper):
"""Wrapper for individual rules in a SecurityGroup."""
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
# The following attributes are defined to keep compatibility with Neutron
@property
def ethertype(self):
return None
@property
def direction(self):
return 'ingress'
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'nova'
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list(self):
return [SecurityGroup(g) for g
in self.client.security_groups.list()]
def get(self, sg_id):
return SecurityGroup(self.client.security_groups.get(sg_id))
def create(self, name, desc):
return SecurityGroup(self.client.security_groups.create(name, desc))
def update(self, sg_id, name, desc):
return SecurityGroup(self.client.security_groups.update(sg_id,
name, desc))
def delete(self, security_group_id):
self.client.security_groups.delete(security_group_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
# Nova Security Group API does not use direction and ethertype fields.
sg = self.client.security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def rule_delete(self, security_group_rule_id):
self.client.security_group_rules.delete(security_group_rule_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = self.client
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [
nova_security_groups.SecurityGroup(
nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
return security_groups
def update_instance_security_group(self, instance_id,
new_security_group_ids):
try:
all_groups = self.list()
except Exception:
raise Exception(_("Couldn't get security group list."))
wanted_groups = set([sg.name for sg in all_groups
if sg.id in new_security_group_ids])
try:
current_groups = self.list_by_instance(instance_id)
except Exception:
raise Exception(_("Couldn't get current security group "
"list for instance %s.")
% instance_id)
current_group_names = set([sg.name for sg in current_groups])
groups_to_add = wanted_groups - current_group_names
groups_to_remove = current_group_names - wanted_groups
num_groups_to_modify = len(groups_to_add | groups_to_remove)
try:
for group in groups_to_add:
self.client.servers.add_security_group(instance_id, group)
num_groups_to_modify -= 1
for group in groups_to_remove:
self.client.servers.remove_security_group(instance_id, group)
num_groups_to_modify -= 1
except Exception:
raise Exception(_('Failed to modify %d instance security groups.')
% num_groups_to_modify)
return True
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(base.APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(base.APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network_base.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id, port_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id):
return instance_id
def list_target_id_by_instance(self, instance_id):
return [instance_id, ]
def is_simple_associate_supported(self):
return conf.HORIZON_CONFIG["simple_ip_management"]
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, base.url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=base.url_for(request, 'compute'),
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = base.url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
flavorid=flavorid,
ephemeral=ephemeral,
swap=swap, is_public=is_public)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request, is_public=True):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list(is_public=is_public)
@memoized
def flavor_access_list(request, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return novaclient(request).flavor_access.list(flavor=flavor)
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)]
has_more_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
return (servers, has_more_data)
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
hardness = nova_servers.REBOOT_SOFT
novaclient(request).servers.reboot(instance_id, hardness)
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name)
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
block_migration,
disk_over_commit)
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api.cinder import cinderclient # noqa
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinderclient(request).volumes.get(volume.id)
volume.name = volume_data.display_name
return volumes
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
def service_list(request):
return novaclient(request).services.list()
def aggregate_details_list(request):
result = []
c = novaclient(request)
for aggregate in c.aggregates.list():
result.append(c.aggregates.get_details(aggregate.id))
return result
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
def host_list(request):
return novaclient(request).hosts.list()
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
@memoized
def list_extensions(request):
return nova_list_extensions.ListExtManager(novaclient(request)).show_all()
@memoized
def extension_supported(extension_name, request):
"""this method will determine if nova supports a given extension name.
example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
| |
#!/usr/bin/env python
import sys, itertools, optparse, warnings
optParser = optparse.OptionParser(
usage = "python %prog [options] <flattened_gff_file> <alignment_file> <output_file>",
description=
"This script counts how many reads in <alignment_file> fall onto each exonic " +
"part given in <flattened_gff_file> and outputs a list of counts in " +
"<output_file>, for further analysis with the DEXSeq Bioconductor package. " +
"Notes: Use dexseq_prepare_annotation.py to produce <flattened_gff_file>. " +
"<alignment_file> may be '-' to indicate standard input.",
epilog =
"Written by Simon Anders (sanders@fs.tum.de) and Alejandro Reyes (reyes@embl.de), " +
"European Molecular Biology Laboratory (EMBL). (c) 2010-2013. Released under the " +
" terms of the GNU General Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-p", "--paired", type="choice", dest="paired",
choices = ( "no", "yes" ), default = "no",
help = "'yes' or 'no'. Indicates whether the data is paired-end (default: no)" )
optParser.add_option( "-s", "--stranded", type="choice", dest="stranded",
choices = ( "yes", "no", "reverse" ), default = "yes",
help = "'yes', 'no', or 'reverse'. Indicates whether the data is " +
"from a strand-specific assay (default: yes ). " +
"Be sure to switch to 'no' if you use a non strand-specific RNA-Seq library " +
"preparation protocol. 'reverse' inverts strands and is needed for certain " +
"protocols, e.g. paired-end with circularization." )
optParser.add_option( "-a", "--minaqual", type="int", dest="minaqual",
default = 10,
help = "skip all reads with alignment quality lower than the given " +
"minimum value (default: 10)" )
optParser.add_option( "-f", "--format", type="choice", dest="alignment",
choices=("sam", "bam"), default="sam",
help = "'sam' or 'bam'. Format of <alignment file> (default: sam)" )
optParser.add_option( "-r", "--order", type="choice", dest="order",
choices=("pos", "name"), default="name",
help = "'pos' or 'name'. Sorting order of <alignment_file> (default: name). Paired-end sequencing " +
"data must be sorted either by position or by read name, and the sorting order " +
"must be specified. Ignored for single-end data." )
if len( sys.argv ) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len( args ) != 3:
sys.stderr.write( sys.argv[0] + ": Error: Please provide three arguments.\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gff_file = args[0]
sam_file = args[1]
out_file = args[2]
stranded = opts.stranded == "yes" or opts.stranded == "reverse"
reverse = opts.stranded == "reverse"
is_PE = opts.paired == "yes"
alignment = opts.alignment
minaqual = opts.minaqual
order = opts.order
if alignment == "bam":
try:
import pysam
except ImportError:
sys.stderr.write( "Could not import pysam, which is needed to process BAM file (though\n" )
sys.stderr.write( "not to process text SAM files). Please install the 'pysam' library from\n" )
sys.stderr.write( "https://code.google.com/p/pysam/\n" )
sys.exit(1)
if sam_file == "-":
sam_file = sys.stdin
# Step 1: Read in the GFF file as generated by aggregate_genes.py
# and put everything into a GenomicArrayOfSets
features = HTSeq.GenomicArrayOfSets( "auto", stranded=stranded )
for f in HTSeq.GFF_Reader( gff_file ):
if f.type == "exonic_part":
f.name = f.attr['gene_id'] + ":" + f.attr['exonic_part_number']
features[f.iv] += f
# initialise counters
num_reads = 0
counts = {}
counts[ '_empty' ] = 0
counts[ '_ambiguous' ] = 0
counts[ '_lowaqual' ] = 0
counts[ '_notaligned' ] = 0
counts['_ambiguous_readpair_position'] = 0
# put a zero for each feature ID
for iv, s in features.steps():
for f in s:
counts[ f.name ] = 0
#We need this little helper below:
def reverse_strand( s ):
if s == "+":
return "-"
elif s == "-":
return "+"
else:
raise SystemError, "illegal strand"
def update_count_vector( counts, rs ):
if( type(rs) == str):
counts[ rs ] += 1
else:
for f in rs:
counts[f.name] += 1
return counts
def map_read_pair(af, ar):
rs = set()
if af and ar and not af.aligned and not ar.aligned:
return '_notaligned'
if af and ar and not af.aQual < minaqual and ar.aQual < minaqual:
return '_lowaqual'
if af and af.aligned and af.aQual >= minaqual and af.iv.chrom in features.chrom_vectors.keys():
for cigop in af.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
if ar and ar.aligned and ar.aQual >= minaqual and ar.iv.chrom in features.chrom_vectors.keys():
for cigop in ar.cigar:
if cigop.type != "M":
continue
if not reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
return '_empty'
elif len( set_of_gene_names ) > 1:
return '_ambiguous'
else:
return rs
def clean_read_queue( queue, current_position ):
clean_queue = dict( queue )
for i in queue:
if queue[i].mate_start.pos < current_position:
warnings.warn( "Read "+ i + " claims to have an aligned mate that could not be found in the same chromosome." )
del clean_queue[i]
return clean_queue
if alignment == "sam":
reader = HTSeq.SAM_Reader
else:
if HTSeq.__version__ < '0.5.4p4':
raise SystemError, "If you are using alignment files in a bam format, please update your HTSeq to 0.5.4p4 or higher"
reader = HTSeq.BAM_Reader
# Now go through the aligned reads
num_reads = 0
if not is_PE:
for a in reader( sam_file ):
if not a.aligned:
counts[ '_notaligned' ] += 1
continue
if a.optional_field("NH") > 1:
continue
if a.aQual < minaqual:
counts[ '_lowaqual' ] += 1
continue
rs = set()
for cigop in a.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps( ):
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
counts[ '_empty' ] += 1
elif len( set_of_gene_names ) > 1:
counts[ '_ambiguous' ] +=1
else:
for f in rs:
counts[ f.name ] += 1
num_reads += 1
if num_reads % 100000 == 0:
sys.stderr.write( "%d reads processed.\n" % num_reads )
else: # paired-end
alignments = dict()
if order == "name":
for af, ar in HTSeq.pair_SAM_alignments( reader( sam_file ) ):
if af == None or ar == None:
continue
if not ar.aligned:
continue
if not af.aligned:
continue
elif ar.optional_field("NH") > 1 or af.optional_field("NH") > 1:
continue
elif af.iv.chrom != ar.iv.chrom:
counts['_ambiguous_readpair_position'] += 1
continue
else:
rs = map_read_pair( af, ar )
counts = update_count_vector( counts, rs )
num_reads += 1
if num_reads % 100000 == 0:
sys.stderr.write( "%d reads processed.\n" % num_reads )
else:
processed_chromosomes = dict()
num_reads = 0
current_chromosome=''
current_position=''
for a in reader( sam_file ):
if not a.aligned:
continue
if a.optional_field("NH") > 1:
continue
if current_chromosome != a.iv.chrom:
if current_chromosome in processed_chromosomes:
raise SystemError, "A chromosome that had finished to be processed before was found again in the alignment file, is your alignment file properly sorted by position?"
processed_chromosomes[current_chromosome] = 1
alignments = clean_read_queue( alignments, current_position )
del alignments
alignments = dict()
if current_chromosome == a.iv.chrom and a.iv.start < current_position:
raise SystemError, "Current read position is smaller than previous reads, is your alignment file properly sorted by position?"
current_chromosome = a.iv.chrom
current_position = a.iv.start
if a.read.name and a.mate_aligned:
if a.read.name in alignments:
b = alignments[ a.read.name ]
if a.pe_which == "first" and b.pe_which == "second":
af=a
ar=b
else:
af=b
ar=a
rs = map_read_pair(af, ar)
del alignments[ a.read.name ]
counts = update_count_vector(counts, rs)
else:
if a.mate_start.chrom != a.iv.chrom:
counts['_ambiguous_readpair_position'] += 1
continue
else:
alignments[ a.read.name ] = a
else:
continue
num_reads += 1
if num_reads % 200000 == 0:
alignments = clean_read_queue( alignments, current_position )
sys.stderr.write( "%d reads processed.\n" % (num_reads / 2) )
# Step 3: Write out the results
fout = open( out_file, "w" )
for fn in sorted( counts.keys() ):
fout.write( "%s\t%d\n" % ( fn, counts[fn] ) )
fout.close()
| |
import json
import re
import datetime
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from crits.core import form_consts
from crits.core.class_mapper import class_from_id, class_from_value
from crits.core.crits_mongoengine import EmbeddedSource, EmbeddedCampaign
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.handsontable_tools import convert_handsontable_to_rows, parse_bulk_upload
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.data_tools import convert_string_to_bool
from crits.core.handlers import csv_export
from crits.core.user_tools import user_sources, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.domains.domain import Domain, TLD
from crits.domains.forms import AddDomainForm
from crits.ips.ip import IP
from crits.ips.handlers import validate_and_normalize_ip
from crits.notifications.handlers import remove_user_from_notification
from crits.objects.handlers import object_array_to_dict, validate_and_add_new_handler_object
from crits.relationships.handlers import forge_relationship
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.relationships import RelationshipTypes
def get_valid_root_domain(domain):
"""
Validate the given domain and TLD, and if valid, parse out the root domain
:param domain: the domain to validate and parse
:type domain: str
:returns: tuple: (Valid root domain, Valid FQDN, Error message)
"""
root = fqdn = error = ""
black_list = "/:@\ "
domain = domain.strip()
if any(c in black_list for c in domain):
error = 'Domain cannot contain space or characters %s' % (black_list)
else:
global tld_parser
root = tld_parser.parse(domain)
if root == "no_tld_found_error":
tld_parser = etld()
root = tld_parser.parse(domain)
if root == "no_tld_found_error":
error = 'No valid TLD found'
root = ""
else:
fqdn = domain.lower()
return (root, fqdn, error)
def get_domain_details(domain, analyst):
"""
Generate the data to render the Domain details template.
:param domain: The name of the Domain to get details for.
:type domain: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
allowed_sources = user_sources(analyst)
dmain = Domain.objects(domain=domain,
source__name__in=allowed_sources).first()
if not dmain:
error = ("Either no data exists for this domain"
" or you do not have permission to view it.")
template = "error.html"
args = {'error': error}
return template, args
dmain.sanitize_sources(username="%s" % analyst,
sources=allowed_sources)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, dmain.id, 'Domain')
# subscription
subscription = {
'type': 'Domain',
'id': dmain.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Domain',
dmain.id),
}
#objects
objects = dmain.sort_objects()
#relationships
relationships = dmain.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Domain',
'value': dmain.id
}
#comments
comments = {'comments': dmain.get_comments(),
'url_key':dmain.domain}
#screenshots
screenshots = dmain.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Domain', dmain.id)
# services
service_list = get_supported_services('Domain')
# analysis results
service_results = dmain.get_analysis_results()
args = {'objects': objects,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
'subscription': subscription,
'screenshots': screenshots,
'domain': dmain,
'service_list': service_list,
'service_results': service_results}
return template, args
def generate_domain_csv(request):
"""
Generate a CSV file of the Domain information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Domain)
return response
def generate_domain_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Domain
type_ = "domain"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Domains",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_, type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_, type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Domains'",
'text': "'All'",
'click': "function () {$('#domain_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Domains'",
'text': "'New'",
'click': "function () {$('#domain_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Domains'",
'text': "'In Progress'",
'click': "function () {$('#domain_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Domains'",
'text': "'Analyzed'",
'click': "function () {$('#domain_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Domains'",
'text': "'Deprecated'",
'click': "function () {$('#domain_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Domain'",
'text': "'Add Domain'",
'click': "function () {$('#new-domain').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def add_new_domain_via_bulk(data, rowData, request, errors,
is_validate_only=False, cache={}):
"""
Wrapper for add_new_domain to pass in rowData.
:param data: The data about the domain.
:type data: dict
:param rowData: Any objects that need to be added to the domain.
:type rowData: dict
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param errors: A list of current errors to append to.
:type errors: list
:param is_validate_only: Only validate the data and return any errors.
:type is_validate_only: boolean
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:returns: tuple
"""
return add_new_domain(data, request, errors, rowData=rowData,
is_validate_only=is_validate_only, cache=cache)
def retrieve_domain(domain, cache):
"""
Retrieves a domain by checking cache first. If not in cache
then queries mongo for the domain.
:param domain: The domain name.
:type domain: str
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:returns: :class:`crits.domains.domain.Domain`
"""
domain_obj = None
cached_results = cache.get(form_consts.Domain.CACHED_RESULTS)
if cached_results:
domain_obj = cached_results.get(domain.lower())
if not domain_obj:
domain_obj = Domain.objects(domain__iexact=domain).first()
return domain_obj
def add_new_domain(data, request, errors, rowData=None, is_validate_only=False, cache={}):
"""
Add a new domain to CRITs.
:param data: The data about the domain.
:type data: dict
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param errors: A list of current errors to append to.
:type errors: list
:param rowData: Any objects that need to be added to the domain.
:type rowData: dict
:param is_validate_only: Only validate the data and return any errors.
:type is_validate_only: boolean
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:returns: tuple (<result>, <errors>, <retVal>)
"""
result = False
retVal = {}
domain = data['domain']
add_ip = data.get('add_ip')
ip = data.get('ip')
ip_type = data.get('ip_type')
if add_ip:
error = validate_and_normalize_ip(ip, ip_type)[1]
if error:
errors.append(error)
if is_validate_only:
error = get_valid_root_domain(domain)[2]
if error:
errors.append(error)
# check for duplicate domains
fqdn_domain = retrieve_domain(domain, cache)
if fqdn_domain:
if isinstance(fqdn_domain, Domain):
resp_url = reverse('crits.domains.views.domain_detail', args=[domain])
message = ('Warning: Domain already exists: '
'<a href="%s">%s</a>' % (resp_url, domain))
retVal['message'] = message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = message
else:
result_cache = cache.get(form_consts.Domain.CACHED_RESULTS);
result_cache[domain.lower()] = True
elif not errors:
username = request.user.username
reference = data.get('domain_reference')
source_name = data.get('domain_source')
method = data.get('domain_method')
source = [create_embedded_source(source_name, reference=reference,
method=method, analyst=username)]
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
related_id = data.get('related_id')
related_type = data.get('related_type')
relationship_type = data.get('relationship_type')
if data.get('campaign') and data.get('confidence'):
campaign = [EmbeddedCampaign(name=data.get('campaign'),
confidence=data.get('confidence'),
analyst=username)]
else:
campaign = []
retVal = upsert_domain(domain, source, username, campaign,
bucket_list=bucket_list, ticket=ticket, cache=cache, related_id=related_id, related_type=related_type, relationship_type=relationship_type)
if not retVal['success']:
errors.append(retVal.get('message'))
retVal['message'] = ""
else:
new_domain = retVal['object']
ip_result = {}
if add_ip:
if data.get('same_source'):
ip_source = source_name
ip_method = method
ip_reference = reference
else:
ip_source = data.get('ip_source')
ip_method = data.get('ip_method')
ip_reference = data.get('ip_reference')
from crits.ips.handlers import ip_add_update
ip_result = ip_add_update(ip,
ip_type,
ip_source,
ip_method,
ip_reference,
campaign=campaign,
analyst=username,
bucket_list=bucket_list,
ticket=ticket,
cache=cache)
if not ip_result['success']:
errors.append(ip_result['message'])
else:
#add a relationship with the new IP address
new_ip = ip_result['object']
if new_domain and new_ip:
new_domain.add_relationship(new_ip,
RelationshipTypes.RESOLVED_TO,
analyst=username,
get_rels=False)
new_domain.save(username=username)
#set the URL for viewing the new data
resp_url = reverse('crits.domains.views.domain_detail', args=[domain])
if retVal['is_domain_new'] == True:
retVal['message'] = ('Success! Click here to view the new domain: '
'<a href="%s">%s</a>' % (resp_url, domain))
else:
message = ('Updated existing domain: <a href="%s">%s</a>' % (resp_url, domain))
retVal['message'] = message
retVal[form_consts.Status.STATUS_FIELD] = form_consts.Status.DUPLICATE
retVal['warning'] = message
#add indicators
if data.get('add_indicators'):
from crits.indicators.handlers import create_indicator_from_tlo
# If we have an IP object, add an indicator for that.
if ip_result.get('success'):
ip = ip_result['object']
result = create_indicator_from_tlo('IP',
ip,
username,
ip_source,
add_domain=False)
ip_ind = result.get('indicator')
if not result['success']:
errors.append(result['message'])
# Add an indicator for the domain.
result = create_indicator_from_tlo('Domain',
new_domain,
username,
source_name,
add_domain=False)
if not result['success']:
errors.append(result['message'])
elif ip_result.get('success') and ip_ind:
forge_relationship(class_=result['indicator'],
right_class=ip_ind,
rel_type=RelationshipTypes.RESOLVED_TO,
user=username)
result = True
# This block validates, and may also add, objects to the Domain
if retVal.get('success') or is_validate_only == True:
if rowData:
objectsData = rowData.get(form_consts.Common.OBJECTS_DATA)
# add new objects if they exist
if objectsData:
objectsData = json.loads(objectsData)
current_domain = retrieve_domain(domain, cache)
for object_row_counter, objectData in enumerate(objectsData, 1):
if current_domain != None:
# if the domain exists then try to add objects to it
if isinstance(current_domain, Domain) == True:
objectDict = object_array_to_dict(objectData,
"Domain",
current_domain.id)
else:
objectDict = object_array_to_dict(objectData,
"Domain",
"")
current_domain = None;
else:
objectDict = object_array_to_dict(objectData,
"Domain",
"")
(obj_result,
errors,
obj_retVal) = validate_and_add_new_handler_object(
None, objectDict, request, errors, object_row_counter,
is_validate_only=is_validate_only,
cache=cache, obj=current_domain)
if not obj_result:
retVal['success'] = False
return result, errors, retVal
def edit_domain_name(domain, new_domain, analyst):
"""
Edit domain name for an entry.
:param domain: The domain name to edit.
:type domain: str
:param new_domain: The new domain name.
:type new_domain: str
:param analyst: The user editing the domain name.
:type analyst: str
:returns: boolean
"""
# validate new domain
(root, validated_domain, error) = get_valid_root_domain(new_domain)
if error:
return False
domain = Domain.objects(domain=domain).first()
if not domain:
return False
try:
domain.domain = validated_domain
domain.save(username=analyst)
return True
except ValidationError:
return False
def upsert_domain(domain, source, username=None, campaign=None,
confidence=None, bucket_list=None, ticket=None, cache={}, related_id=None, related_type=None, relationship_type=None):
"""
Add or update a domain/FQDN. Campaign is assumed to be a list of campaign
dictionary objects.
:param domain: The domain to add/update.
:type domain: str
:param source: The name of the source.
:type source: str
:param username: The user adding/updating the domain.
:type username: str
:param campaign: The campaign to attribute to this domain.
:type campaign: list, str
:param confidence: Confidence for the campaign attribution.
:type confidence: str
:param bucket_list: List of buckets to add to this domain.
:type bucket_list: list, str
:param ticket: The ticket for this domain.
:type ticket: str
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:param related_id: ID of object to create relationship with
:type related_id: str
:param related_type: Type of object to create relationship with
:type related_id: str
:param relationship_type: Type of relationship to create.
:type relationship_type: str
:returns: dict with keys:
"success" (boolean),
"object" the domain that was added,
"is_domain_new" (boolean)
"""
# validate domain and grab root domain
(root, domain, error) = get_valid_root_domain(domain)
if error:
return {'success': False, 'message': error}
is_fqdn_domain_new = False
is_root_domain_new = False
if not campaign:
campaign = []
# assume it's a list, but check if it's a string
elif isinstance(campaign, basestring):
c = EmbeddedCampaign(name=campaign, confidence=confidence, analyst=username)
campaign = [c]
# assume it's a list, but check if it's a string
if isinstance(source, basestring):
s = EmbeddedSource()
s.name = source
instance = EmbeddedSource.SourceInstance()
instance.reference = ''
instance.method = ''
instance.analyst = username
instance.date = datetime.datetime.now()
s.instances = [instance]
source = [s]
fqdn_domain = None
root_domain = None
cached_results = cache.get(form_consts.Domain.CACHED_RESULTS)
if cached_results != None:
if domain != root:
fqdn_domain = cached_results.get(domain)
root_domain = cached_results.get(root)
else:
root_domain = cached_results.get(root)
else:
#first find the domain(s) if it/they already exist
root_domain = Domain.objects(domain=root).first()
if domain != root:
fqdn_domain = Domain.objects(domain=domain).first()
#if they don't exist, create them
if not root_domain:
root_domain = Domain()
root_domain.domain = root
root_domain.source = []
root_domain.record_type = 'A'
is_root_domain_new = True
if cached_results != None:
cached_results[root] = root_domain
if domain != root and not fqdn_domain:
fqdn_domain = Domain()
fqdn_domain.domain = domain
fqdn_domain.source = []
fqdn_domain.record_type = 'A'
is_fqdn_domain_new = True
if cached_results != None:
cached_results[domain] = fqdn_domain
# if new or found, append the new source(s)
for s in source:
if root_domain:
root_domain.add_source(s)
if fqdn_domain:
fqdn_domain.add_source(s)
#campaigns
#both root and fqdn get campaigns updated
for c in campaign:
if root_domain:
root_domain.add_campaign(c)
if fqdn_domain:
fqdn_domain.add_campaign(c)
if username:
if root_domain:
root_domain.analyst = username
if fqdn_domain:
fqdn_domain.analyst = username
if bucket_list:
if root_domain:
root_domain.add_bucket_list(bucket_list, username)
if fqdn_domain:
fqdn_domain.add_bucket_list(bucket_list, username)
if ticket:
if root_domain:
root_domain.add_ticket(ticket, username)
if fqdn_domain:
fqdn_domain.add_ticket(ticket, username)
related_obj = None
if related_id:
related_obj = class_from_id(related_type, related_id)
if not related_obj:
retVal['success'] = False
retVal['message'] = 'Related Object not found.'
return retVal
# save
try:
if root_domain:
root_domain.save(username=username)
if fqdn_domain:
fqdn_domain.save(username=username)
except Exception, e:
return {'success': False, 'message': e}
#Add relationships between fqdn, root
if fqdn_domain and root_domain:
root_domain.add_relationship(fqdn_domain,
RelationshipTypes.SUPRA_DOMAIN_OF,
analyst=username,
get_rels=False)
root_domain.save(username=username)
fqdn_domain.save(username=username)
#Add relationships from object domain is being added from
if related_obj and relationship_type:
relationship_type=RelationshipTypes.inverse(relationship=relationship_type)
if fqdn_domain and (related_obj != fqdn_domain):
fqdn_domain.add_relationship(related_obj,
relationship_type,
analyst=username,
get_rels=False)
fqdn_domain.save(username=username)
if root_domain and (related_obj != root_domain):
root_domain.add_relationship(related_obj,
relationship_type,
analyst=username,
get_rels=False)
root_domain.save(username=username)
# run domain triage
if is_fqdn_domain_new:
fqdn_domain.reload()
run_triage(fqdn_domain, username)
if is_root_domain_new:
root_domain.reload()
run_triage(root_domain, username)
# return fqdn if they added an fqdn, or root if they added a root
if fqdn_domain:
return {'success': True, 'object': fqdn_domain, 'is_domain_new': is_fqdn_domain_new}
else:
return {'success': True, 'object': root_domain, 'is_domain_new': is_root_domain_new}
def update_tlds(data=None):
"""
Update the TLD list in the database.
:param data: The TLD data.
:type data: file handle.
:returns: dict with key "success" (boolean)
"""
if not data:
return {'success': False}
line = data.readline()
while line:
line = line.rstrip()
if line and not line.startswith('//'):
TLD.objects(tld=line).update_one(set__tld=line, upsert=True)
line = data.readline()
# Update the package local tld_parser with the new domain info
tld_parser = etld()
return {'success': True}
class etld(object):
"""
TLD class to assist with extracting root domains.
"""
def __init__(self):
self.rules = {}
etlds = TLD.objects()
for etld in etlds:
tld = etld.tld.split('.')[-1]
self.rules.setdefault(tld, [])
self.rules[tld].append(re.compile(self.regexpize(etld.tld)))
def regexpize(self, etld):
"""
Generate regex for this TLD.
:param etld: The TLD to generate regex for.
:returns: str
"""
etld = etld[::-1].replace('.',
'\\.').replace('*',
'[^\\.]*').replace('!',
'')
return '^(%s)\.(.*)$' % etld
def parse(self, hostname):
"""
Parse the domain.
:param hostname: The domain to parse.
:returns: str
"""
try:
hostname = hostname.lower()
tld = hostname.split('.')[-1]
hostname = hostname[::-1]
etld = ''
for rule in self.rules[tld]:
m = rule.match(hostname)
if m and m.group(1) > etld:
mytld = "%s.%s" % ( m.group(2)[::-1].split(".")[-1],
m.group(1)[::-1])
if not mytld:
return ("no_tld_found_error")
return (mytld)
except Exception:
return ("no_tld_found_error")
def parse_row_to_bound_domain_form(request, rowData, cache):
"""
Parse a row in bulk upload into form data that can be used to add a Domain.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The objects to add for the Domain.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:returns: :class:`crits.domains.forms.AddDomainForm`
"""
bound_domain_form = None
# TODO Add common method to convert data to string
domain_name = rowData.get(form_consts.Domain.DOMAIN_NAME, "").strip();
campaign = rowData.get(form_consts.Domain.CAMPAIGN, "")
confidence = rowData.get(form_consts.Domain.CAMPAIGN_CONFIDENCE, "")
domain_source = rowData.get(form_consts.Domain.DOMAIN_SOURCE, "")
domain_method = rowData.get(form_consts.Domain.DOMAIN_METHOD, "")
domain_reference = rowData.get(form_consts.Domain.DOMAIN_REFERENCE, "")
#is_add_ip = convert_string_to_bool(rowData.get(form_consts.Domain.ADD_IP_ADDRESS, ""))
is_add_ip = False
ip = rowData.get(form_consts.Domain.IP_ADDRESS, "")
ip_type = rowData.get(form_consts.Domain.IP_TYPE, "")
created = rowData.get(form_consts.Domain.IP_DATE, "")
#is_same_source = convert_string_to_bool(rowData.get(form_consts.Domain.SAME_SOURCE, "False"))
is_same_source = False
ip_source = rowData.get(form_consts.Domain.IP_SOURCE, "")
ip_method = rowData.get(form_consts.Domain.IP_METHOD, "")
ip_reference = rowData.get(form_consts.Domain.IP_REFERENCE, "")
is_add_indicators = convert_string_to_bool(rowData.get(form_consts.Domain.ADD_INDICATORS, "False"))
bucket_list = rowData.get(form_consts.Common.BUCKET_LIST, "")
ticket = rowData.get(form_consts.Common.TICKET, "")
if(ip or created or ip_source or ip_method or ip_reference):
is_add_ip = True
if is_add_ip == True:
data = {'domain': domain_name,
'campaign': campaign,
'confidence': confidence,
'domain_source': domain_source,
'domain_method': domain_method,
'domain_reference': domain_reference,
'add_ip': is_add_ip,
'ip': ip,
'ip_type': ip_type,
'created': created,
'same_source': is_same_source,
'ip_source': ip_source,
'ip_method': ip_method,
'ip_reference': ip_reference,
'add_indicators': is_add_indicators,
'bucket_list': bucket_list,
'ticket': ticket}
bound_domain_form = cache.get("domain_ip_form")
if bound_domain_form == None:
bound_domain_form = AddDomainForm(request.user, data)
cache['domain_ip_form'] = bound_domain_form
else:
bound_domain_form.data = data
else:
data = {'domain': domain_name,
'campaign': campaign,
'confidence': confidence,
'domain_source': domain_source,
'domain_method': domain_method,
'domain_reference': domain_reference,
'add_ip': is_add_ip,
'bucket_list': bucket_list,
'ticket': ticket}
bound_domain_form = cache.get("domain_form")
if bound_domain_form == None:
bound_domain_form = AddDomainForm(request.user, data)
cache['domain_form'] = bound_domain_form
else:
bound_domain_form.data = data
if bound_domain_form != None:
bound_domain_form.full_clean()
return bound_domain_form
def process_bulk_add_domain(request, formdict):
"""
Performs the bulk add of domains by parsing the request data. Batches
some data into a cache object for performance by reducing large
amounts of single database queries.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param formdict: The form representing the bulk uploaded data.
:type formdict: dict
:returns: :class:`django.http.HttpResponse`
"""
domain_names = []
ip_addresses = []
cached_domain_results = {}
cached_ip_results = {}
cleanedRowsData = convert_handsontable_to_rows(request)
for rowData in cleanedRowsData:
if rowData != None:
if rowData.get(form_consts.Domain.DOMAIN_NAME) != None:
domain = rowData.get(form_consts.Domain.DOMAIN_NAME).strip().lower()
(root_domain, full_domain, error) = get_valid_root_domain(domain)
domain_names.append(full_domain)
if domain != root_domain:
domain_names.append(root_domain)
if rowData.get(form_consts.Domain.IP_ADDRESS) != None:
ip_addr = rowData.get(form_consts.Domain.IP_ADDRESS)
ip_type = rowData.get(form_consts.Domain.IP_TYPE)
(ip_addr, error) = validate_and_normalize_ip(ip_addr, ip_type)
ip_addresses.append(ip_addr)
domain_results = Domain.objects(domain__in=domain_names)
ip_results = IP.objects(ip__in=ip_addresses)
for domain_result in domain_results:
cached_domain_results[domain_result.domain] = domain_result
for ip_result in ip_results:
cached_ip_results[ip_result.ip] = ip_result
cache = {form_consts.Domain.CACHED_RESULTS: cached_domain_results,
form_consts.IP.CACHED_RESULTS: cached_ip_results,
'cleaned_rows_data': cleanedRowsData}
response = parse_bulk_upload(request, parse_row_to_bound_domain_form, add_new_domain_via_bulk, formdict, cache)
return response
# Global definition of the TLD parser -- etld.
# This is a workaround to use a global instance because the __init__ method takes ~0.5 seconds to
# initialize. Was causing performance problems (high CPU usage) with bulk uploading of domains since
# each domain needed to create the etld() class.
# TODO investigate if updating of TLDs causes this global instance to become stale.
tld_parser = etld()
| |
from itertools import chain
from decimal import Decimal as D
import hashlib
from django.conf import settings
from django.db import models
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import AutoSlugField
from . import exceptions
class AbstractOrder(models.Model):
"""
The main order model
"""
number = models.CharField(_("Order number"), max_length=128, db_index=True)
# We track the site that each order is placed within
site = models.ForeignKey('sites.Site', verbose_name=_("Site"), null=True,
on_delete=models.SET_NULL)
basket = models.ForeignKey(
'basket.Basket', verbose_name=_("Basket"),
null=True, blank=True, on_delete=models.SET_NULL)
# Orders can be placed without the user authenticating so we don't always
# have a customer ID.
user = models.ForeignKey(
AUTH_USER_MODEL, related_name='orders', null=True, blank=True,
verbose_name=_("User"), on_delete=models.SET_NULL)
# Billing address is not always required (eg paying by gift card)
billing_address = models.ForeignKey(
'order.BillingAddress', null=True, blank=True,
verbose_name=_("Billing Address"),
on_delete=models.SET_NULL)
# Total price looks like it could be calculated by adding up the
# prices of the associated lines, but in some circumstances extra
# order-level charges are added and so we need to store it separately
currency = models.CharField(
_("Currency"), max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY)
total_incl_tax = models.DecimalField(
_("Order total (inc. tax)"), decimal_places=2, max_digits=12)
total_excl_tax = models.DecimalField(
_("Order total (excl. tax)"), decimal_places=2, max_digits=12)
# Shipping charges
shipping_incl_tax = models.DecimalField(
_("Shipping charge (inc. tax)"), decimal_places=2, max_digits=12,
default=0)
shipping_excl_tax = models.DecimalField(
_("Shipping charge (excl. tax)"), decimal_places=2, max_digits=12,
default=0)
# Not all lines are actually shipped (such as downloads), hence shipping
# address is not mandatory.
shipping_address = models.ForeignKey(
'order.ShippingAddress', null=True, blank=True,
verbose_name=_("Shipping Address"),
on_delete=models.SET_NULL)
shipping_method = models.CharField(
_("Shipping method"), max_length=128, blank=True)
# Identifies shipping code
shipping_code = models.CharField(blank=True, max_length=128, default="")
# Use this field to indicate that an order is on hold / awaiting payment
status = models.CharField(_("Status"), max_length=100, blank=True)
guest_email = models.EmailField(_("Guest email address"), blank=True)
# Index added to this field for reporting
date_placed = models.DateTimeField(auto_now_add=True, db_index=True)
#: Order status pipeline. This should be a dict where each (key, value) #:
#: corresponds to a status and a list of possible statuses that can follow
#: that one.
pipeline = getattr(settings, 'OSCAR_ORDER_STATUS_PIPELINE', {})
#: Order status cascade pipeline. This should be a dict where each (key,
#: value) pair corresponds to an *order* status and the corresponding
#: *line* status that needs to be set when the order is set to the new
#: status
cascade = getattr(settings, 'OSCAR_ORDER_STATUS_CASCADE', {})
@classmethod
def all_statuses(cls):
"""
Return all possible statuses for an order
"""
return cls.pipeline.keys()
def available_statuses(self):
"""
Return all possible statuses that this order can move to
"""
return self.pipeline.get(self.status, ())
def set_status(self, new_status):
"""
Set a new status for this order.
If the requested status is not valid, then ``InvalidOrderStatus`` is
raised.
"""
if new_status == self.status:
return
if new_status not in self.available_statuses():
raise exceptions.InvalidOrderStatus(
_("'%(new_status)s' is not a valid status for order %(number)s"
" (current status: '%(status)s')")
% {'new_status': new_status,
'number': self.number,
'status': self.status})
self.status = new_status
if new_status in self.cascade:
for line in self.lines.all():
line.status = self.cascade[self.status]
line.save()
self.save()
set_status.alters_data = True
@property
def is_anonymous(self):
# It's possible for an order to be placed by a customer who then
# deletes their profile. Hence, we need to check that a guest email is
# set.
return self.user is None and bool(self.guest_email)
@property
def basket_total_before_discounts_incl_tax(self):
"""
Return basket total including tax but before discounts are applied
"""
total = D('0.00')
for line in self.lines.all():
total += line.line_price_before_discounts_incl_tax
return total
@property
def basket_total_before_discounts_excl_tax(self):
"""
Return basket total excluding tax but before discounts are applied
"""
total = D('0.00')
for line in self.lines.all():
total += line.line_price_before_discounts_excl_tax
return total
@property
def basket_total_incl_tax(self):
"""
Return basket total including tax
"""
return self.total_incl_tax - self.shipping_incl_tax
@property
def basket_total_excl_tax(self):
"""
Return basket total excluding tax
"""
return self.total_excl_tax - self.shipping_excl_tax
@property
def total_before_discounts_incl_tax(self):
return (self.basket_total_before_discounts_incl_tax +
self.shipping_incl_tax)
@property
def total_before_discounts_excl_tax(self):
return (self.basket_total_before_discounts_excl_tax +
self.shipping_excl_tax)
@property
def total_discount_incl_tax(self):
"""
The amount of discount this order received
"""
discount = D('0.00')
for line in self.lines.all():
discount += line.discount_incl_tax
return discount
@property
def total_discount_excl_tax(self):
discount = D('0.00')
for line in self.lines.all():
discount += line.discount_excl_tax
return discount
@property
def total_tax(self):
return self.total_incl_tax - self.total_excl_tax
@property
def num_lines(self):
return self.lines.count()
@property
def num_items(self):
"""
Returns the number of items in this order.
"""
num_items = 0
for line in self.lines.all():
num_items += line.quantity
return num_items
@property
def shipping_tax(self):
return self.shipping_incl_tax - self.shipping_excl_tax
@property
def shipping_status(self):
events = self.shipping_events.all()
if not len(events):
return ''
# Collect all events by event-type
map = {}
for event in events:
event_name = event.event_type.name
if event_name not in map:
map[event_name] = []
map[event_name] = list(chain(map[event_name],
event.line_quantities.all()))
# Determine last complete event
status = _("In progress")
for event_name, event_line_quantities in map.items():
if self._is_event_complete(event_line_quantities):
status = event_name
return status
@property
def has_shipping_discounts(self):
return len(self.shipping_discounts) > 0
@property
def shipping_before_discounts_incl_tax(self):
# We can construct what shipping would have been before discounts by
# adding the discounts back onto the final shipping charge.
total = D('0.00')
for discount in self.shipping_discounts:
total += discount.amount
return self.shipping_incl_tax + total
def _is_event_complete(self, event_quantities):
# Form map of line to quantity
map = {}
for event_quantity in event_quantities:
line_id = event_quantity.line_id
map.setdefault(line_id, 0)
map[line_id] += event_quantity.quantity
for line in self.lines.all():
if map[line.id] != line.quantity:
return False
return True
class Meta:
abstract = True
ordering = ['-date_placed']
verbose_name = _("Order")
verbose_name_plural = _("Orders")
def __unicode__(self):
return u"#%s" % (self.number,)
def verification_hash(self):
key = '%s%s' % (self.number, settings.SECRET_KEY)
hash = hashlib.md5(key.encode('utf8'))
return hash.hexdigest()
@property
def email(self):
if not self.user:
return self.guest_email
return self.user.email
@property
def basket_discounts(self):
# This includes both offer- and voucher- discounts. For orders we
# don't need to treat them differently like we do for baskets.
return self.discounts.filter(
category=AbstractOrderDiscount.BASKET)
@property
def shipping_discounts(self):
return self.discounts.filter(
category=AbstractOrderDiscount.SHIPPING)
@property
def post_order_actions(self):
return self.discounts.filter(
category=AbstractOrderDiscount.DEFERRED)
class AbstractOrderNote(models.Model):
"""
A note against an order.
This are often used for audit purposes too. IE, whenever an admin
makes a change to an order, we create a note to record what happened.
"""
order = models.ForeignKey('order.Order', related_name="notes",
verbose_name=_("Order"))
# These are sometimes programatically generated so don't need a
# user everytime
user = models.ForeignKey(AUTH_USER_MODEL, null=True,
verbose_name=_("User"))
# We allow notes to be classified although this isn't always needed
INFO, WARNING, ERROR, SYSTEM = 'Info', 'Warning', 'Error', 'System'
note_type = models.CharField(_("Note Type"), max_length=128, blank=True)
message = models.TextField(_("Message"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date Updated"), auto_now=True)
# Notes can only be edited for 5 minutes after being created
editable_lifetime = 300
class Meta:
abstract = True
verbose_name = _("Order Note")
verbose_name_plural = _("Order Notes")
def __unicode__(self):
return u"'%s' (%s)" % (self.message[0:50], self.user)
def is_editable(self):
if self.note_type == self.SYSTEM:
return False
delta = timezone.now() - self.date_updated
return delta.seconds < self.editable_lifetime
class AbstractCommunicationEvent(models.Model):
"""
An order-level event involving a communication to the customer, such
as an confirmation email being sent.
"""
order = models.ForeignKey(
'order.Order', related_name="communication_events",
verbose_name=_("Order"))
event_type = models.ForeignKey(
'customer.CommunicationEventType', verbose_name=_("Event Type"))
date_created = models.DateTimeField(_("Date"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _("Communication Event")
verbose_name_plural = _("Communication Events")
ordering = ['-date_created']
def __unicode__(self):
return _("'%(type)s' event for order #%(number)s") \
% {'type': self.event_type.name, 'number': self.order.number}
# LINES
class AbstractLine(models.Model):
"""
A order line (basically a product and a quantity)
Not using a line model as it's difficult to capture and payment
information when it splits across a line.
"""
order = models.ForeignKey(
'order.Order', related_name='lines', verbose_name=_("Order"))
#: We keep a link to the stockrecord used for this line which allows us to
#: update stocklevels when it ships
stockrecord = models.ForeignKey(
'partner.StockRecord', on_delete=models.SET_NULL, blank=True,
null=True, verbose_name=_("Stock record"))
# We store the partner, their SKU and the title for cases where the product
# has been deleted from the catalogue. We also store the partner name in
# case the partner gets deleted at a later date.
partner = models.ForeignKey(
'partner.Partner', related_name='order_lines', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name=_("Partner"))
partner_name = models.CharField(
_("Partner name"), max_length=128, blank=True)
partner_sku = models.CharField(_("Partner SKU"), max_length=128)
title = models.CharField(_("Title"), max_length=255)
# UPC can be null because it's usually set as the product's UPC, and that
# can be null as well
upc = models.CharField(_("UPC"), max_length=128, blank=True, null=True)
# We don't want any hard links between orders and the products table so we
# allow this link to be NULLable.
product = models.ForeignKey(
'catalogue.Product', on_delete=models.SET_NULL, blank=True, null=True,
verbose_name=_("Product"))
quantity = models.PositiveIntegerField(_("Quantity"), default=1)
# Price information (these fields are actually redundant as the information
# can be calculated from the LinePrice models
line_price_incl_tax = models.DecimalField(
_("Price (inc. tax)"), decimal_places=2, max_digits=12)
line_price_excl_tax = models.DecimalField(
_("Price (excl. tax)"), decimal_places=2, max_digits=12)
# Price information before discounts are applied
line_price_before_discounts_incl_tax = models.DecimalField(
_("Price before discounts (inc. tax)"),
decimal_places=2, max_digits=12)
line_price_before_discounts_excl_tax = models.DecimalField(
_("Price before discounts (excl. tax)"),
decimal_places=2, max_digits=12)
# REPORTING FIELDS
# Cost price (the price charged by the fulfilment partner for this
# product).
unit_cost_price = models.DecimalField(
_("Unit Cost Price"), decimal_places=2, max_digits=12, blank=True,
null=True)
# Normal site price for item (without discounts)
unit_price_incl_tax = models.DecimalField(
_("Unit Price (inc. tax)"), decimal_places=2, max_digits=12,
blank=True, null=True)
unit_price_excl_tax = models.DecimalField(
_("Unit Price (excl. tax)"), decimal_places=2, max_digits=12,
blank=True, null=True)
# Retail price at time of purchase
unit_retail_price = models.DecimalField(
_("Unit Retail Price"), decimal_places=2, max_digits=12,
blank=True, null=True)
# Partner information
partner_line_reference = models.CharField(
_("Partner reference"), max_length=128, blank=True,
help_text=_("This is the item number that the partner uses "
"within their system"))
partner_line_notes = models.TextField(
_("Partner Notes"), blank=True)
# Partners often want to assign some status to each line to help with their
# own business processes.
status = models.CharField(_("Status"), max_length=255, blank=True)
# Estimated dispatch date - should be set at order time
est_dispatch_date = models.DateField(
_("Estimated Dispatch Date"), blank=True, null=True)
#: Order status pipeline. This should be a dict where each (key, value)
#: corresponds to a status and the possible statuses that can follow that
#: one.
pipeline = getattr(settings, 'OSCAR_LINE_STATUS_PIPELINE', {})
class Meta:
abstract = True
verbose_name = _("Order Line")
verbose_name_plural = _("Order Lines")
def __unicode__(self):
if self.product:
title = self.product.title
else:
title = _('<missing product>')
return _("Product '%(name)s', quantity '%(qty)s'") % {
'name': title, 'qty': self.quantity}
@classmethod
def all_statuses(cls):
"""
Return all possible statuses for an order line
"""
return cls.pipeline.keys()
def available_statuses(self):
"""
Return all possible statuses that this order line can move to
"""
return self.pipeline.get(self.status, ())
def set_status(self, new_status):
"""
Set a new status for this line
If the requested status is not valid, then ``InvalidLineStatus`` is
raised.
"""
if new_status == self.status:
return
if new_status not in self.available_statuses():
raise exceptions.InvalidLineStatus(
_("'%(new_status)s' is not a valid status (current status:"
" '%(status)s')")
% {'new_status': new_status, 'status': self.status})
self.status = new_status
self.save()
set_status.alters_data = True
@property
def category(self):
"""
Used by Google analytics tracking
"""
return None
@property
def description(self):
"""
Returns a description of this line including details of any
line attributes.
"""
desc = self.title
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.type, attribute.value))
if ops:
desc = "%s (%s)" % (desc, ", ".join(ops))
return desc
@property
def discount_incl_tax(self):
return self.line_price_before_discounts_incl_tax \
- self.line_price_incl_tax
@property
def discount_excl_tax(self):
return self.line_price_before_discounts_excl_tax \
- self.line_price_excl_tax
@property
def line_price_tax(self):
return self.line_price_incl_tax - self.line_price_excl_tax
@property
def unit_price_tax(self):
return self.unit_price_incl_tax - self.unit_price_excl_tax
# Shipping status helpers
@property
def shipping_status(self):
"""
Returns a string summary of the shipping status of this line
"""
status_map = self.shipping_event_breakdown
if not status_map:
return ''
events = []
last_complete_event_name = None
for event_dict in reversed(list(status_map.values())):
if event_dict['quantity'] == self.quantity:
events.append(event_dict['name'])
last_complete_event_name = event_dict['name']
else:
events.append("%s (%d/%d items)" % (
event_dict['name'], event_dict['quantity'],
self.quantity))
if last_complete_event_name == list(status_map.values())[0]['name']:
return last_complete_event_name
return ', '.join(events)
def is_shipping_event_permitted(self, event_type, quantity):
"""
Test whether a shipping event with the given quantity is permitted
This method should normally be overriden to ensure that the
prerequisite shipping events have been passed for this line.
"""
# Note, this calculation is simplistic - normally, you will also need
# to check if previous shipping events have occurred. Eg, you can't
# return lines until they have been shipped.
current_qty = self.shipping_event_quantity(event_type)
return (current_qty + quantity) <= self.quantity
def shipping_event_quantity(self, event_type):
"""
Return the quantity of this line that has been involved in a shipping
event of the passed type.
"""
result = self.shipping_event_quantities.filter(
event__event_type=event_type).aggregate(Sum('quantity'))
if result['quantity__sum'] is None:
return 0
else:
return result['quantity__sum']
def has_shipping_event_occurred(self, event_type, quantity=None):
"""
Test whether this line has passed a given shipping event
"""
if not quantity:
quantity = self.quantity
return self.shipping_event_quantity(event_type) == quantity
@property
def shipping_event_breakdown(self):
"""
Returns a dict of shipping events that this line has been through
"""
status_map = SortedDict()
for event in self.shipping_events.all():
event_type = event.event_type
event_name = event_type.name
event_quantity = event.line_quantities.get(line=self).quantity
if event_name in status_map:
status_map[event_name]['quantity'] += event_quantity
else:
status_map[event_name] = {
'event_type': event_type,
'name': event_name,
'quantity': event_quantity
}
return status_map
# Payment event helpers
def is_payment_event_permitted(self, event_type, quantity):
"""
Test whether a payment event with the given quantity is permitted
"""
current_qty = self.payment_event_quantity(event_type)
return (current_qty + quantity) <= self.quantity
def payment_event_quantity(self, event_type):
"""
Return the quantity of this line that has been involved in a payment
event of the passed type.
"""
result = self.payment_event_quantities.filter(
event__event_type=event_type).aggregate(Sum('quantity'))
if result['quantity__sum'] is None:
return 0
else:
return result['quantity__sum']
@property
def is_product_deleted(self):
return self.product is None
def is_available_to_reorder(self, basket, strategy):
"""
Test if this line can be re-ordered using the passed strategy and
basket
"""
if not self.product:
return False, (_("'%(title)s' is no longer available") %
{'title': self.title})
try:
basket_line = basket.lines.get(product=self.product)
except basket.lines.model.DoesNotExist:
desired_qty = self.quantity
else:
desired_qty = basket_line.quantity + self.quantity
result = strategy.fetch_for_product(self.product)
is_available, reason = result.availability.is_purchase_permitted(
quantity=desired_qty)
if not is_available:
return False, reason
return True, None
class AbstractLineAttribute(models.Model):
"""
An attribute of a line
"""
line = models.ForeignKey(
'order.Line', related_name='attributes',
verbose_name=_("Line"))
option = models.ForeignKey(
'catalogue.Option', null=True, on_delete=models.SET_NULL,
related_name="line_attributes", verbose_name=_("Option"))
type = models.CharField(_("Type"), max_length=128)
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
verbose_name = _("Line Attribute")
verbose_name_plural = _("Line Attributes")
def __unicode__(self):
return "%s = %s" % (self.type, self.value)
class AbstractLinePrice(models.Model):
"""
For tracking the prices paid for each unit within a line.
This is necessary as offers can lead to units within a line
having different prices. For example, one product may be sold at
50% off as it's part of an offer while the remainder are full price.
"""
order = models.ForeignKey(
'order.Order', related_name='line_prices', verbose_name=_("Option"))
line = models.ForeignKey(
'order.Line', related_name='prices', verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"), default=1)
price_incl_tax = models.DecimalField(
_("Price (inc. tax)"), decimal_places=2, max_digits=12)
price_excl_tax = models.DecimalField(
_("Price (excl. tax)"), decimal_places=2, max_digits=12)
shipping_incl_tax = models.DecimalField(
_("Shiping (inc. tax)"), decimal_places=2, max_digits=12, default=0)
shipping_excl_tax = models.DecimalField(
_("Shipping (excl. tax)"), decimal_places=2, max_digits=12, default=0)
class Meta:
abstract = True
ordering = ('id',)
verbose_name = _("Line Price")
verbose_name_plural = _("Line Prices")
def __unicode__(self):
return _("Line '%(number)s' (quantity %(qty)d) price %(price)s") % {
'number': self.line,
'qty': self.quantity,
'price': self.price_incl_tax}
# PAYMENT EVENTS
class AbstractPaymentEventType(models.Model):
"""
Payment event types are things like 'Paid', 'Failed', 'Refunded'.
These are effectively the transaction types.
"""
name = models.CharField(_("Name"), max_length=128, unique=True)
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
class Meta:
abstract = True
verbose_name = _("Payment Event Type")
verbose_name_plural = _("Payment Event Types")
ordering = ('name', )
def __unicode__(self):
return self.name
class AbstractPaymentEvent(models.Model):
"""
A payment event for an order
For example:
* All lines have been paid for
* 2 lines have been refunded
"""
order = models.ForeignKey(
'order.Order', related_name='payment_events',
verbose_name=_("Order"))
amount = models.DecimalField(
_("Amount"), decimal_places=2, max_digits=12)
# The reference should refer to the transaction ID of the payment gateway
# that was used for this event.
reference = models.CharField(
_("Reference"), max_length=128, blank=True)
lines = models.ManyToManyField(
'order.Line', through='PaymentEventQuantity',
verbose_name=_("Lines"))
event_type = models.ForeignKey(
'order.PaymentEventType', verbose_name=_("Event Type"))
# Allow payment events to be linked to shipping events. Often a shipping
# event will trigger a payment event and so we can use this FK to capture
# the relationship.
shipping_event = models.ForeignKey(
'order.ShippingEvent', related_name='payment_events',
null=True)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _("Payment Event")
verbose_name_plural = _("Payment Events")
ordering = ['-date_created']
def __unicode__(self):
return _("Payment event for order %s") % self.order
def num_affected_lines(self):
return self.lines.all().count()
class PaymentEventQuantity(models.Model):
"""
A "through" model linking lines to payment events
"""
event = models.ForeignKey(
'order.PaymentEvent', related_name='line_quantities',
verbose_name=_("Event"))
line = models.ForeignKey(
'order.Line', related_name="payment_event_quantities",
verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"))
class Meta:
verbose_name = _("Payment Event Quantity")
verbose_name_plural = _("Payment Event Quantities")
# SHIPPING EVENTS
class AbstractShippingEvent(models.Model):
"""
An event is something which happens to a group of lines such as
1 item being dispatched.
"""
order = models.ForeignKey(
'order.Order', related_name='shipping_events', verbose_name=_("Order"))
lines = models.ManyToManyField(
'order.Line', related_name='shipping_events',
through='ShippingEventQuantity', verbose_name=_("Lines"))
event_type = models.ForeignKey(
'order.ShippingEventType', verbose_name=_("Event Type"))
notes = models.TextField(
_("Event notes"), blank=True,
help_text=_("This could be the dispatch reference, or a "
"tracking number"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _("Shipping Event")
verbose_name_plural = _("Shipping Events")
ordering = ['-date_created']
def __unicode__(self):
return _("Order #%(number)s, type %(type)s") % {
'number': self.order.number,
'type': self.event_type}
def num_affected_lines(self):
return self.lines.count()
class ShippingEventQuantity(models.Model):
"""
A "through" model linking lines to shipping events.
This exists to track the quantity of a line that is involved in a
particular shipping event.
"""
event = models.ForeignKey(
'order.ShippingEvent', related_name='line_quantities',
verbose_name=_("Event"))
line = models.ForeignKey(
'order.Line', related_name="shipping_event_quantities",
verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"))
class Meta:
verbose_name = _("Shipping Event Quantity")
verbose_name_plural = _("Shipping Event Quantities")
def save(self, *args, **kwargs):
# Default quantity to full quantity of line
if not self.quantity:
self.quantity = self.line.quantity
# Ensure we don't violate quantities constraint
if not self.line.is_shipping_event_permitted(
self.event.event_type, self.quantity):
raise exceptions.InvalidShippingEvent
super(ShippingEventQuantity, self).save(*args, **kwargs)
def __unicode__(self):
return _("%(product)s - quantity %(qty)d") % {
'product': self.line.product,
'qty': self.quantity}
class AbstractShippingEventType(models.Model):
"""
A type of shipping/fulfillment event
Eg: 'Shipped', 'Cancelled', 'Returned'
"""
# Name is the friendly description of an event
name = models.CharField(_("Name"), max_length=255, unique=True)
# Code is used in forms
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
class Meta:
abstract = True
verbose_name = _("Shipping Event Type")
verbose_name_plural = _("Shipping Event Types")
ordering = ('name', )
def __unicode__(self):
return self.name
# DISCOUNTS
class AbstractOrderDiscount(models.Model):
"""
A discount against an order.
Normally only used for display purposes so an order can be listed with
discounts displayed separately even though in reality, the discounts are
applied at the line level.
This has evolved to be a slightly misleading class name as this really
track benefit applications which aren't necessarily discounts.
"""
order = models.ForeignKey(
'order.Order', related_name="discounts", verbose_name=_("Order"))
# We need to distinguish between basket discounts, shipping discounts and
# 'deferred' discounts.
BASKET, SHIPPING, DEFERRED = "Basket", "Shipping", "Deferred"
CATEGORY_CHOICES = (
(BASKET, _(BASKET)),
(SHIPPING, _(SHIPPING)),
(DEFERRED, _(DEFERRED)),
)
category = models.CharField(
_("Discount category"), default=BASKET, max_length=64,
choices=CATEGORY_CHOICES)
offer_id = models.PositiveIntegerField(
_("Offer ID"), blank=True, null=True)
offer_name = models.CharField(
_("Offer name"), max_length=128, db_index=True, blank=True)
voucher_id = models.PositiveIntegerField(
_("Voucher ID"), blank=True, null=True)
voucher_code = models.CharField(
_("Code"), max_length=128, db_index=True, blank=True)
frequency = models.PositiveIntegerField(_("Frequency"), null=True)
amount = models.DecimalField(
_("Amount"), decimal_places=2, max_digits=12, default=0)
# Post-order offer applications can return a message to indicate what
# action was taken after the order was placed.
message = models.TextField(blank=True)
@property
def is_basket_discount(self):
return self.category == self.BASKET
@property
def is_shipping_discount(self):
return self.category == self.SHIPPING
@property
def is_post_order_action(self):
return self.category == self.DEFERRED
class Meta:
abstract = True
verbose_name = _("Order Discount")
verbose_name_plural = _("Order Discounts")
def save(self, **kwargs):
if self.offer_id and not self.offer_name:
offer = self.offer
if offer:
self.offer_name = offer.name
if self.voucher_id and not self.voucher_code:
voucher = self.voucher
if voucher:
self.voucher_code = voucher.code
super(AbstractOrderDiscount, self).save(**kwargs)
def __unicode__(self):
return _("Discount of %(amount)r from order %(order)s") % {
'amount': self.amount, 'order': self.order}
@property
def offer(self):
Offer = models.get_model('offer', 'ConditionalOffer')
try:
return Offer.objects.get(id=self.offer_id)
except Offer.DoesNotExist:
return None
@property
def voucher(self):
Voucher = models.get_model('voucher', 'Voucher')
try:
return Voucher.objects.get(id=self.voucher_id)
except Voucher.DoesNotExist:
return None
def description(self):
if self.voucher_code:
return self.voucher_code
return self.offer_name or u""
| |
# This file is part of the Indico plugins.
# Copyright (C) 2020 - 2022 CERN and ENEA
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import time
import jwt
from pytz import utc
from requests import Session
from requests.exceptions import HTTPError
def format_iso_dt(d):
"""Convert a datetime objects to a UTC-based string.
:param d: The :class:`datetime.datetime` to convert to a string
:returns: The string representation of the date
"""
return d.astimezone(utc).strftime('%Y-%m-%dT%H:%M:%SZ')
def _handle_response(resp, expected_code=200, expects_json=True):
try:
resp.raise_for_status()
if resp.status_code != expected_code:
raise HTTPError(f'Unexpected status code {resp.status_code}', response=resp)
except HTTPError:
from indico_vc_zoom.plugin import ZoomPlugin
ZoomPlugin.logger.error('Error in API call to %s: %s', resp.url, resp.content)
raise
return resp.json() if expects_json else resp
class APIException(Exception):
pass
class BaseComponent:
def __init__(self, base_uri, config, timeout):
self.base_uri = base_uri
self.config = config
self.timeout = timeout
@property
def token(self):
header = {'alg': 'HS256', 'typ': 'JWT'}
payload = {'iss': self.config['api_key'], 'exp': int(time.time() + 3600)}
return jwt.encode(payload, self.config['api_secret'], algorithm='HS256', headers=header)
@property
def session(self):
session = Session()
session.headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.token}'
}
return session
class MeetingComponent(BaseComponent):
def list(self, user_id, **kwargs):
return self.get(
f'{self.base_uri}/users/{user_id}/meetings', params=kwargs
)
def create(self, user_id, **kwargs):
if kwargs.get('start_time'):
kwargs['start_time'] = format_iso_dt(kwargs['start_time'])
return self.session.post(
f'{self.base_uri}/users/{user_id}/meetings',
json=kwargs
)
def get(self, meeting_id, **kwargs):
return self.session.get(f'{self.base_uri}/meetings/{meeting_id}', json=kwargs)
def update(self, meeting_id, **kwargs):
if kwargs.get('start_time'):
kwargs['start_time'] = format_iso_dt(kwargs['start_time'])
return self.session.patch(
f'{self.base_uri}/meetings/{meeting_id}', json=kwargs
)
def delete(self, meeting_id, **kwargs):
return self.session.delete(
f'{self.base_uri}/meetings/{meeting_id}', json=kwargs
)
class WebinarComponent(BaseComponent):
def list(self, user_id, **kwargs):
return self.get(
f'{self.base_uri}/users/{user_id}/webinars', params=kwargs
)
def create(self, user_id, **kwargs):
if kwargs.get('start_time'):
kwargs['start_time'] = format_iso_dt(kwargs['start_time'])
return self.session.post(
f'{self.base_uri}/users/{user_id}/webinars',
json=kwargs
)
def get(self, meeting_id, **kwargs):
return self.session.get(f'{self.base_uri}/webinars/{meeting_id}', json=kwargs)
def update(self, meeting_id, **kwargs):
if kwargs.get('start_time'):
kwargs['start_time'] = format_iso_dt(kwargs['start_time'])
return self.session.patch(
f'{self.base_uri}/webinars/{meeting_id}', json=kwargs
)
def delete(self, meeting_id, **kwargs):
return self.session.delete(
f'{self.base_uri}/webinars/{meeting_id}', json=kwargs
)
class UserComponent(BaseComponent):
def me(self):
return self.get('me')
def list(self, **kwargs):
return self.session.get(f'{self.base_uri}/users', params=kwargs)
def create(self, **kwargs):
return self.session.post(f'{self.base_uri}/users', params=kwargs)
def update(self, user_id, **kwargs):
return self.session.patch(f'{self.base_uri}/users/{user_id}', params=kwargs)
def delete(self, user_id, **kwargs):
return self.session.delete(f'{self.base_uri}/users/{user_id}', params=kwargs)
def get(self, user_id, **kwargs):
return self.session.get(f'{self.base_uri}/users/{user_id}', params=kwargs)
class ZoomClient:
"""Zoom REST API Python Client."""
BASE_URI = 'https://api.zoom.us/v2'
_components = {
'user': UserComponent,
'meeting': MeetingComponent,
'webinar': WebinarComponent
}
def __init__(self, api_key, api_secret, timeout=15):
"""Create a new Zoom client.
:param api_key: the Zoom JWT API key
:param api_secret: the Zoom JWT API Secret
:param timeout: the time out to use for API requests
"""
# Setup the config details
config = {
'api_key': api_key,
'api_secret': api_secret
}
# Instantiate the components
self.components = {
key: component(base_uri=self.BASE_URI, config=config, timeout=timeout)
for key, component in self._components.items()
}
@property
def meeting(self):
"""Get the meeting component."""
return self.components['meeting']
@property
def user(self):
"""Get the user component."""
return self.components['user']
@property
def webinar(self):
"""Get the webinar component."""
return self.components['webinar']
class ZoomIndicoClient:
def __init__(self):
from indico_vc_zoom.plugin import ZoomPlugin
self.client = ZoomClient(
ZoomPlugin.settings.get('api_key'),
ZoomPlugin.settings.get('api_secret')
)
def create_meeting(self, user_id, **kwargs):
return _handle_response(self.client.meeting.create(user_id, **kwargs), 201)
def get_meeting(self, meeting_id):
return _handle_response(self.client.meeting.get(meeting_id))
def update_meeting(self, meeting_id, data):
return _handle_response(self.client.meeting.update(meeting_id, **data), 204, expects_json=False)
def delete_meeting(self, meeting_id):
return _handle_response(self.client.meeting.delete(meeting_id), 204, expects_json=False)
def create_webinar(self, user_id, **kwargs):
return _handle_response(self.client.webinar.create(user_id, **kwargs), 201)
def get_webinar(self, webinar_id):
return _handle_response(self.client.webinar.get(webinar_id))
def update_webinar(self, webinar_id, data):
return _handle_response(self.client.webinar.update(webinar_id, **data), 204, expects_json=False)
def delete_webinar(self, webinar_id):
return _handle_response(self.client.webinar.delete(webinar_id), 204, expects_json=False)
def get_user(self, user_id, silent=False):
resp = self.client.user.get(user_id)
if resp.status_code == 404 and silent:
return None
return _handle_response(resp)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from dragon.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging
import os
import re
try:
import UserString as _userString
except ImportError:
import collections as _userString
from babel import localedata
import six
_localedir = os.environ.get('dragon'.upper() + '_LOCALEDIR')
_t = gettext.translation('dragon', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, 'dragon')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
#
# Also included below is an example LocaleHandler that translates
# Messages to an associated locale, effectively allowing many logs,
# each with their own locale.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(_userString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self._locale = None
self.params = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
if six.PY3:
ugettext = lang.gettext
else:
ugettext = lang.ugettext
full_msg = (self._left_extra_msg +
ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return six.text_type(full_msg)
@property
def locale(self):
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
if not self.params:
return
# This Message object may have been constructed with one or more
# Message objects as substitution parameters, given as a single
# Message, or a tuple or Map containing some, so when setting the
# locale for this Message we need to set it for those Messages too.
if isinstance(self.params, Message):
self.params.locale = value
return
if isinstance(self.params, tuple):
for param in self.params:
if isinstance(param, Message):
param.locale = value
return
if isinstance(self.params, dict):
for param in self.params.values():
if isinstance(param, Message):
param.locale = value
def _save_dictionary_parameter(self, dict_param):
full_msg = self.data
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
# apparently the full dictionary is the parameter
params = copy.deepcopy(dict_param)
else:
params = {}
for key in keys:
try:
params[key] = copy.deepcopy(dict_param[key])
except TypeError:
# cast uncopyable thing to unicode string
params[key] = six.text_type(dict_param[key])
return params
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
elif isinstance(other, dict):
self.params = self._save_dictionary_parameter(other)
else:
# fallback to casting to unicode,
# this will handle the problematic python code-like
# objects that cannot be deep-copied
try:
self.params = copy.deepcopy(other)
except TypeError:
self.params = six.text_type(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
if six.PY3:
return self.__unicode__()
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', '_locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return _userString.UserString.__getattribute__(self, name)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and all projects udpate
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def get_localized_message(message, user_locale):
"""Gets a localized version of the given message in the given locale."""
if isinstance(message, Message):
if user_locale:
message.locale = user_locale
return six.text_type(message)
else:
return message
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)
| |
import logging
import os
import ssl
from json import dumps as json_dumps
from json import loads as json_loads
from urllib.parse import urlparse
import pytest
from sanic import Sanic
from sanic import Blueprint
from sanic.exceptions import ServerError
from sanic.request import DEFAULT_HTTP_CONTENT_TYPE
from sanic.response import json, text
from sanic.testing import HOST, PORT
# ------------------------------------------------------------ #
# GET
# ------------------------------------------------------------ #
def test_sync(app):
@app.route("/")
def handler(request):
return text("Hello")
request, response = app.test_client.get("/")
assert response.text == "Hello"
def test_remote_address(app):
@app.route("/")
def handler(request):
return text("{}".format(request.ip))
request, response = app.test_client.get("/")
assert response.text == "127.0.0.1"
def test_text(app):
@app.route("/")
async def handler(request):
return text("Hello")
request, response = app.test_client.get("/")
assert response.text == "Hello"
def test_headers(app):
@app.route("/")
async def handler(request):
headers = {"spam": "great"}
return text("Hello", headers=headers)
request, response = app.test_client.get("/")
assert response.headers.get("spam") == "great"
def test_non_str_headers(app):
@app.route("/")
async def handler(request):
headers = {"answer": 42}
return text("Hello", headers=headers)
request, response = app.test_client.get("/")
assert response.headers.get("answer") == "42"
def test_invalid_response(app):
@app.exception(ServerError)
def handler_exception(request, exception):
return text("Internal Server Error.", 500)
@app.route("/")
async def handler(request):
return "This should fail"
request, response = app.test_client.get("/")
assert response.status == 500
assert response.text == "Internal Server Error."
def test_json(app):
@app.route("/")
async def handler(request):
return json({"test": True})
request, response = app.test_client.get("/")
results = json_loads(response.text)
assert results.get("test") is True
def test_empty_json(app):
@app.route("/")
async def handler(request):
assert request.json is None
return json(request.json)
request, response = app.test_client.get("/")
assert response.status == 200
assert response.text == "null"
def test_invalid_json(app):
@app.route("/")
async def handler(request):
return json(request.json)
data = "I am not json"
request, response = app.test_client.get("/", data=data)
assert response.status == 400
def test_query_string(app):
@app.route("/")
async def handler(request):
return text("OK")
request, response = app.test_client.get(
"/", params=[("test1", "1"), ("test2", "false"), ("test2", "true")]
)
assert request.args.get("test1") == "1"
assert request.args.get("test2") == "false"
def test_uri_template(app):
@app.route("/foo/<id:int>/bar/<name:[A-z]+>")
async def handler(request, id, name):
return text("OK")
request, response = app.test_client.get("/foo/123/bar/baz")
assert request.uri_template == "/foo/<id:int>/bar/<name:[A-z]+>"
def test_token(app):
@app.route("/")
async def handler(request):
return text("OK")
# uuid4 generated token.
token = "a1d895e0-553a-421a-8e22-5ff8ecb48cbf"
headers = {
"content-type": "application/json",
"Authorization": "{}".format(token),
}
request, response = app.test_client.get("/", headers=headers)
assert request.token == token
token = "a1d895e0-553a-421a-8e22-5ff8ecb48cbf"
headers = {
"content-type": "application/json",
"Authorization": "Token {}".format(token),
}
request, response = app.test_client.get("/", headers=headers)
assert request.token == token
token = "a1d895e0-553a-421a-8e22-5ff8ecb48cbf"
headers = {
"content-type": "application/json",
"Authorization": "Bearer {}".format(token),
}
request, response = app.test_client.get("/", headers=headers)
assert request.token == token
# no Authorization headers
headers = {"content-type": "application/json"}
request, response = app.test_client.get("/", headers=headers)
assert request.token is None
def test_content_type(app):
@app.route("/")
async def handler(request):
return text(request.content_type)
request, response = app.test_client.get("/")
assert request.content_type == DEFAULT_HTTP_CONTENT_TYPE
assert response.text == DEFAULT_HTTP_CONTENT_TYPE
headers = {"content-type": "application/json"}
request, response = app.test_client.get("/", headers=headers)
assert request.content_type == "application/json"
assert response.text == "application/json"
def test_remote_addr(app):
@app.route("/")
async def handler(request):
return text(request.remote_addr)
headers = {"X-Forwarded-For": "127.0.0.1, 127.0.1.2"}
request, response = app.test_client.get("/", headers=headers)
assert request.remote_addr == "127.0.0.1"
assert response.text == "127.0.0.1"
request, response = app.test_client.get("/")
assert request.remote_addr == ""
assert response.text == ""
headers = {"X-Forwarded-For": "127.0.0.1, , ,,127.0.1.2"}
request, response = app.test_client.get("/", headers=headers)
assert request.remote_addr == "127.0.0.1"
assert response.text == "127.0.0.1"
def test_match_info(app):
@app.route("/api/v1/user/<user_id>/")
async def handler(request, user_id):
return json(request.match_info)
request, response = app.test_client.get("/api/v1/user/sanic_user/")
assert request.match_info == {"user_id": "sanic_user"}
assert json_loads(response.text) == {"user_id": "sanic_user"}
# ------------------------------------------------------------ #
# POST
# ------------------------------------------------------------ #
def test_post_json(app):
@app.route("/", methods=["POST"])
async def handler(request):
return text("OK")
payload = {"test": "OK"}
headers = {"content-type": "application/json"}
request, response = app.test_client.post(
"/", data=json_dumps(payload), headers=headers
)
assert request.json.get("test") == "OK"
assert request.json.get("test") == "OK" # for request.parsed_json
assert response.text == "OK"
def test_post_form_urlencoded(app):
@app.route("/", methods=["POST"])
async def handler(request):
return text("OK")
payload = "test=OK"
headers = {"content-type": "application/x-www-form-urlencoded"}
request, response = app.test_client.post(
"/", data=payload, headers=headers
)
assert request.form.get("test") == "OK"
assert request.form.get("test") == "OK" # For request.parsed_form
@pytest.mark.parametrize(
"payload",
[
"------sanic\r\n"
'Content-Disposition: form-data; name="test"\r\n'
"\r\n"
"OK\r\n"
"------sanic--\r\n",
"------sanic\r\n"
'content-disposition: form-data; name="test"\r\n'
"\r\n"
"OK\r\n"
"------sanic--\r\n",
],
)
def test_post_form_multipart_form_data(app, payload):
@app.route("/", methods=["POST"])
async def handler(request):
return text("OK")
headers = {"content-type": "multipart/form-data; boundary=----sanic"}
request, response = app.test_client.post(data=payload, headers=headers)
assert request.form.get("test") == "OK"
@pytest.mark.parametrize(
"path,query,expected_url",
[
("/foo", "", "http://{}:{}/foo"),
("/bar/baz", "", "http://{}:{}/bar/baz"),
("/moo/boo", "arg1=val1", "http://{}:{}/moo/boo?arg1=val1"),
],
)
def test_url_attributes_no_ssl(app, path, query, expected_url):
async def handler(request):
return text("OK")
app.add_route(handler, path)
request, response = app.test_client.get(path + "?{}".format(query))
assert request.url == expected_url.format(HOST, PORT)
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host
@pytest.mark.parametrize(
"path,query,expected_url",
[
("/foo", "", "https://{}:{}/foo"),
("/bar/baz", "", "https://{}:{}/bar/baz"),
("/moo/boo", "arg1=val1", "https://{}:{}/moo/boo?arg1=val1"),
],
)
def test_url_attributes_with_ssl_context(app, path, query, expected_url):
current_dir = os.path.dirname(os.path.realpath(__file__))
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(
os.path.join(current_dir, "certs/selfsigned.cert"),
keyfile=os.path.join(current_dir, "certs/selfsigned.key"),
)
async def handler(request):
return text("OK")
app.add_route(handler, path)
request, response = app.test_client.get(
"https://{}:{}".format(HOST, PORT) + path + "?{}".format(query),
server_kwargs={"ssl": context},
)
assert request.url == expected_url.format(HOST, PORT)
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host
@pytest.mark.parametrize(
"path,query,expected_url",
[
("/foo", "", "https://{}:{}/foo"),
("/bar/baz", "", "https://{}:{}/bar/baz"),
("/moo/boo", "arg1=val1", "https://{}:{}/moo/boo?arg1=val1"),
],
)
def test_url_attributes_with_ssl_dict(app, path, query, expected_url):
current_dir = os.path.dirname(os.path.realpath(__file__))
ssl_cert = os.path.join(current_dir, "certs/selfsigned.cert")
ssl_key = os.path.join(current_dir, "certs/selfsigned.key")
ssl_dict = {"cert": ssl_cert, "key": ssl_key}
async def handler(request):
return text("OK")
app.add_route(handler, path)
request, response = app.test_client.get(
"https://{}:{}".format(HOST, PORT) + path + "?{}".format(query),
server_kwargs={"ssl": ssl_dict},
)
assert request.url == expected_url.format(HOST, PORT)
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host
def test_invalid_ssl_dict(app):
@app.get("/test")
async def handler(request):
return text("ssl test")
ssl_dict = {"cert": None, "key": None}
with pytest.raises(ValueError) as excinfo:
request, response = app.test_client.get(
"/test", server_kwargs={"ssl": ssl_dict}
)
assert str(excinfo.value) == "SSLContext or certificate and key required."
def test_form_with_multiple_values(app):
@app.route("/", methods=["POST"])
async def handler(request):
return text("OK")
payload = "selectedItems=v1&selectedItems=v2&selectedItems=v3"
headers = {"content-type": "application/x-www-form-urlencoded"}
request, response = app.test_client.post(
"/", data=payload, headers=headers
)
assert request.form.getlist("selectedItems") == ["v1", "v2", "v3"]
def test_request_string_representation(app):
@app.route("/", methods=["GET"])
async def get(request):
return text("OK")
request, _ = app.test_client.get("/")
assert repr(request) == "<Request: GET />"
@pytest.mark.parametrize(
"payload,filename",
[
("------sanic\r\n"
'Content-Disposition: form-data; filename="filename"; name="test"\r\n'
"\r\n"
"OK\r\n"
"------sanic--\r\n", "filename"),
("------sanic\r\n"
'content-disposition: form-data; filename="filename"; name="test"\r\n'
"\r\n"
'content-type: application/json; {"field": "value"}\r\n'
"------sanic--\r\n", "filename"),
("------sanic\r\n"
'Content-Disposition: form-data; filename=""; name="test"\r\n'
"\r\n"
"OK\r\n"
"------sanic--\r\n", ""),
("------sanic\r\n"
'content-disposition: form-data; filename=""; name="test"\r\n'
"\r\n"
'content-type: application/json; {"field": "value"}\r\n'
"------sanic--\r\n", ""),
("------sanic\r\n"
'Content-Disposition: form-data; filename*="utf-8\'\'filename_%C2%A0_test"; name="test"\r\n'
"\r\n"
"OK\r\n"
"------sanic--\r\n", "filename_\u00A0_test"),
("------sanic\r\n"
'content-disposition: form-data; filename*="utf-8\'\'filename_%C2%A0_test"; name="test"\r\n'
"\r\n"
'content-type: application/json; {"field": "value"}\r\n'
"------sanic--\r\n", "filename_\u00A0_test"),
],
)
def test_request_multipart_files(app, payload, filename):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
headers = {"content-type": "multipart/form-data; boundary=----sanic"}
request, _ = app.test_client.post(data=payload, headers=headers)
assert request.files.get("test").name == filename
def test_request_multipart_file_with_json_content_type(app):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
payload = (
"------sanic\r\n"
'Content-Disposition: form-data; name="file"; filename="test.json"\r\n'
"Content-Type: application/json\r\n"
"Content-Length: 0"
"\r\n"
"\r\n"
"------sanic--"
)
headers = {"content-type": "multipart/form-data; boundary=------sanic"}
request, _ = app.test_client.post(data=payload, headers=headers)
assert request.files.get("file").type == "application/json"
def test_request_multipart_file_without_field_name(app, caplog):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
payload = (
'------sanic\r\nContent-Disposition: form-data; filename="test.json"'
"\r\nContent-Type: application/json\r\n\r\n\r\n------sanic--"
)
headers = {"content-type": "multipart/form-data; boundary=------sanic"}
request, _ = app.test_client.post(
data=payload, headers=headers, debug=True
)
with caplog.at_level(logging.DEBUG):
request.form
assert caplog.record_tuples[-1] == (
"sanic.root",
logging.DEBUG,
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header",
)
def test_request_multipart_file_duplicate_filed_name(app):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
payload = (
"--e73ffaa8b1b2472b8ec848de833cb05b\r\n"
'Content-Disposition: form-data; name="file"\r\n'
"Content-Type: application/octet-stream\r\n"
"Content-Length: 15\r\n"
"\r\n"
'{"test":"json"}\r\n'
"--e73ffaa8b1b2472b8ec848de833cb05b\r\n"
'Content-Disposition: form-data; name="file"\r\n'
"Content-Type: application/octet-stream\r\n"
"Content-Length: 15\r\n"
"\r\n"
'{"test":"json2"}\r\n'
"--e73ffaa8b1b2472b8ec848de833cb05b--\r\n"
)
headers = {
"Content-Type": "multipart/form-data; boundary=e73ffaa8b1b2472b8ec848de833cb05b"
}
request, _ = app.test_client.post(
data=payload, headers=headers, debug=True
)
assert request.form.getlist("file") == [
'{"test":"json"}',
'{"test":"json2"}',
]
def test_request_multipart_with_multiple_files_and_type(app):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
payload = (
'------sanic\r\nContent-Disposition: form-data; name="file"; filename="test.json"'
"\r\nContent-Type: application/json\r\n\r\n\r\n"
'------sanic\r\nContent-Disposition: form-data; name="file"; filename="some_file.pdf"\r\n'
"Content-Type: application/pdf\r\n\r\n\r\n------sanic--"
)
headers = {"content-type": "multipart/form-data; boundary=------sanic"}
request, _ = app.test_client.post(data=payload, headers=headers)
assert len(request.files.getlist("file")) == 2
assert request.files.getlist("file")[0].type == "application/json"
assert request.files.getlist("file")[1].type == "application/pdf"
def test_request_repr(app):
@app.get("/")
def handler(request):
return text("pass")
request, response = app.test_client.get("/")
assert repr(request) == "<Request: GET />"
request.method = None
assert repr(request) == "<Request: None />"
def test_request_bool(app):
@app.get("/")
def handler(request):
return text("pass")
request, response = app.test_client.get("/")
assert bool(request)
request.transport = False
assert not bool(request)
def test_request_parsing_form_failed(app, caplog):
@app.route("/", methods=["POST"])
async def handler(request):
return text("OK")
payload = "test=OK"
headers = {"content-type": "multipart/form-data"}
request, response = app.test_client.post(
"/", data=payload, headers=headers
)
with caplog.at_level(logging.ERROR):
request.form
assert caplog.record_tuples[-1] == (
"sanic.error",
logging.ERROR,
"Failed when parsing form",
)
def test_request_args_no_query_string(app):
@app.get("/")
def handler(request):
return text("pass")
request, response = app.test_client.get("/")
assert request.args == {}
def test_request_raw_args(app):
params = {"test": "OK"}
@app.get("/")
def handler(request):
return text("pass")
request, response = app.test_client.get("/", params=params)
assert request.raw_args == params
def test_request_cookies(app):
cookies = {"test": "OK"}
@app.get("/")
def handler(request):
return text("OK")
request, response = app.test_client.get("/", cookies=cookies)
assert request.cookies == cookies
assert request.cookies == cookies # For request._cookies
def test_request_cookies_without_cookies(app):
@app.get("/")
def handler(request):
return text("OK")
request, response = app.test_client.get("/")
assert request.cookies == {}
def test_request_port(app):
@app.get("/")
def handler(request):
return text("OK")
request, response = app.test_client.get("/")
port = request.port
assert isinstance(port, int)
delattr(request, "_socket")
delattr(request, "_port")
port = request.port
assert isinstance(port, int)
assert hasattr(request, "_socket")
assert hasattr(request, "_port")
def test_request_socket(app):
@app.get("/")
def handler(request):
return text("OK")
request, response = app.test_client.get("/")
socket = request.socket
assert isinstance(socket, tuple)
ip = socket[0]
port = socket[1]
assert ip == request.ip
assert port == request.port
delattr(request, "_socket")
socket = request.socket
assert isinstance(socket, tuple)
assert hasattr(request, "_socket")
def test_request_form_invalid_content_type(app):
@app.route("/", methods=["POST"])
async def post(request):
return text("OK")
request, response = app.test_client.post("/", json={"test": "OK"})
assert request.form == {}
def test_endpoint_basic():
app = Sanic()
@app.route("/")
def my_unique_handler(request):
return text("Hello")
request, response = app.test_client.get("/")
assert request.endpoint == "test_requests.my_unique_handler"
def test_endpoint_named_app():
app = Sanic("named")
@app.route("/")
def my_unique_handler(request):
return text("Hello")
request, response = app.test_client.get("/")
assert request.endpoint == "named.my_unique_handler"
def test_endpoint_blueprint():
bp = Blueprint("my_blueprint", url_prefix="/bp")
@bp.route("/")
async def bp_root(request):
return text("Hello")
app = Sanic("named")
app.blueprint(bp)
request, response = app.test_client.get("/bp")
assert request.endpoint == "named.my_blueprint.bp_root"
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import objects
from nova.objects import build_request
from nova import test
from nova.tests import fixtures
from nova.tests.unit import fake_build_request
from nova.tests.unit import fake_instance
class BuildRequestTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(BuildRequestTestCase, self).setUp()
# NOTE: This means that we're using a database for this test suite
# despite inheriting from NoDBTestCase
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.build_req_obj = build_request.BuildRequest()
self.instance_uuid = uuidutils.generate_uuid()
self.project_id = 'fake-project'
def _create_req(self):
args = fake_build_request.fake_db_req()
args.pop('id', None)
args['instance_uuid'] = self.instance_uuid
args['project_id'] = self.project_id
return build_request.BuildRequest._from_db_object(self.context,
self.build_req_obj,
self.build_req_obj._create_in_db(self.context, args))
def test_get_by_instance_uuid_not_found(self):
self.assertRaises(exception.BuildRequestNotFound,
self.build_req_obj._get_by_instance_uuid_from_db, self.context,
self.instance_uuid)
def test_get_by_uuid(self):
expected_req = self._create_req()
req_obj = self.build_req_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
for key in self.build_req_obj.fields.keys():
expected = getattr(expected_req, key)
db_value = getattr(req_obj, key)
if key == 'instance':
self.assertTrue(objects.base.obj_equal_prims(expected,
db_value))
continue
elif key in ('block_device_mappings', 'tags'):
self.assertEqual(1, len(db_value))
# Can't compare list objects directly, just compare the single
# item they contain.
self.assertTrue(objects.base.obj_equal_prims(expected[0],
db_value[0]))
continue
self.assertEqual(expected, db_value)
def test_destroy(self):
self._create_req()
db_req = self.build_req_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
db_req.destroy()
self.assertRaises(exception.BuildRequestNotFound,
self.build_req_obj._get_by_instance_uuid_from_db, self.context,
self.instance_uuid)
def test_destroy_twice_raises(self):
self._create_req()
db_req = self.build_req_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
db_req.destroy()
self.assertRaises(exception.BuildRequestNotFound, db_req.destroy)
def test_save(self):
self._create_req()
db_req = self.build_req_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
db_req.project_id = 'foobar'
db_req.save()
updated_req = self.build_req_obj.get_by_instance_uuid(
self.context, self.instance_uuid)
self.assertEqual('foobar', updated_req.project_id)
def test_save_not_found(self):
self._create_req()
db_req = self.build_req_obj.get_by_instance_uuid(self.context,
self.instance_uuid)
db_req.project_id = 'foobar'
db_req.destroy()
self.assertRaises(exception.BuildRequestNotFound, db_req.save)
class BuildRequestListTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(BuildRequestListTestCase, self).setUp()
# NOTE: This means that we're using a database for this test suite
# despite inheriting from NoDBTestCase
self.useFixture(fixtures.Database(database='api'))
self.project_id = 'fake-project'
self.context = context.RequestContext('fake-user', self.project_id)
def _create_req(self, project_id=None, instance=None):
kwargs = {}
if instance:
kwargs['instance'] = jsonutils.dumps(instance.obj_to_primitive())
args = fake_build_request.fake_db_req(**kwargs)
args.pop('id', None)
args['instance_uuid'] = uuidutils.generate_uuid()
args['project_id'] = self.project_id if not project_id else project_id
return build_request.BuildRequest._from_db_object(self.context,
build_request.BuildRequest(),
build_request.BuildRequest._create_in_db(self.context, args))
def test_get_all_empty(self):
req_objs = build_request.BuildRequestList.get_all(self.context)
self.assertEqual([], req_objs.objects)
def test_get_all(self):
reqs = [self._create_req(), self._create_req()]
req_list = build_request.BuildRequestList.get_all(self.context)
self.assertEqual(2, len(req_list))
for i in range(len(req_list)):
self.assertEqual(reqs[i].instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[i].instance,
req_list[i].instance))
def test_get_all_filter_by_project_id(self):
reqs = [self._create_req(), self._create_req(project_id='filter')]
req_list = build_request.BuildRequestList.get_all(self.context)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[0].project_id, req_list[0].project_id)
self.assertEqual(reqs[0].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[0].instance,
req_list[0].instance))
def test_get_all_bypass_project_id_filter_as_admin(self):
reqs = [self._create_req(), self._create_req(project_id='filter')]
req_list = build_request.BuildRequestList.get_all(
self.context.elevated())
self.assertEqual(2, len(req_list))
for i in range(len(req_list)):
self.assertEqual(reqs[i].project_id, req_list[i].project_id)
self.assertEqual(reqs[i].instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[i].instance,
req_list[i].instance))
def test_get_by_filters(self):
reqs = [self._create_req(), self._create_req()]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, sort_keys=['id'], sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
for i in range(len(req_list)):
self.assertEqual(reqs[i].instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[i].instance,
req_list[i].instance))
def test_get_by_filters_limit_0(self):
self._create_req()
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, limit=0)
self.assertEqual([], req_list.objects)
def test_get_by_filters_deleted(self):
self._create_req()
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'deleted': True})
self.assertEqual([], req_list.objects)
def test_get_by_filters_cleaned(self):
self._create_req()
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'cleaned': True})
self.assertEqual([], req_list.objects)
def test_get_by_filters_exact_match(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='findme')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='filterme')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'image_ref': 'findme'})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[1].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[1].instance,
req_list[0].instance))
def test_get_by_filters_exact_match_list(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='findme')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='filterme')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'image_ref': ['findme', 'fake']})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[1].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[1].instance,
req_list[0].instance))
def test_get_by_filters_exact_match_metadata(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, metadata={'foo': 'bar'}, expected_attrs='metadata')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, metadata={'bar': 'baz'}, expected_attrs='metadata')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'metadata': {'foo': 'bar'}})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[1].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[1].instance,
req_list[0].instance))
def test_get_by_filters_exact_match_metadata_list(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, metadata={'foo': 'bar', 'cat': 'meow'},
expected_attrs='metadata')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, metadata={'bar': 'baz', 'cat': 'meow'},
expected_attrs='metadata')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'metadata': [{'foo': 'bar'}, {'cat': 'meow'}]})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[1].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[1].instance,
req_list[0].instance))
def test_get_by_filters_regex_match_one(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, display_name='find this one')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, display_name='filter this one')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'display_name': 'find'})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
self.assertEqual(reqs[1].instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[1].instance,
req_list[0].instance))
def test_get_by_filters_regex_match_both(self):
instance_find = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, display_name='find this one')
instance_filter = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, display_name='filter this one')
reqs = [self._create_req(instance=instance_filter),
self._create_req(instance=instance_find)]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'display_name': 'this'}, sort_keys=['id'],
sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
for i in range(len(req_list)):
self.assertEqual(reqs[i].instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(reqs[i].instance,
req_list[i].instance))
def test_get_by_filters_sort_asc(self):
instance_1024 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=1024)
instance_512 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=512)
req_second = self._create_req(instance=instance_1024)
req_first = self._create_req(instance=instance_512)
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, sort_keys=['root_gb'], sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
self.assertEqual(req_first.instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_first.instance,
req_list[0].instance))
self.assertEqual(req_second.instance_uuid, req_list[1].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_second.instance,
req_list[1].instance))
def test_get_by_filters_sort_desc(self):
instance_1024 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=1024)
instance_512 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=512)
req_second = self._create_req(instance=instance_512)
req_first = self._create_req(instance=instance_1024)
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, sort_keys=['root_gb'], sort_dirs=['desc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
self.assertEqual(req_first.instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_first.instance,
req_list[0].instance))
self.assertEqual(req_second.instance_uuid, req_list[1].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_second.instance,
req_list[1].instance))
def test_get_by_filters_sort_build_req_id(self):
# Create instance objects this way so that there is no 'id' set.
# The 'id' will not be populated on a BuildRequest.instance so this
# checks that sorting by 'id' uses the BuildRequest.id.
instance_1 = objects.Instance(self.context, host=None,
uuid=uuidutils.generate_uuid())
instance_2 = objects.Instance(self.context, host=None,
uuid=uuidutils.generate_uuid())
req_first = self._create_req(instance=instance_2)
req_second = self._create_req(instance=instance_1)
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, sort_keys=['id'], sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
self.assertEqual(req_first.instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_first.instance,
req_list[0].instance))
self.assertEqual(req_second.instance_uuid, req_list[1].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_second.instance,
req_list[1].instance))
def test_get_by_filters_multiple_sort_keys(self):
instance_first = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=512, image_ref='ccc')
instance_second = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=512, image_ref='bbb')
instance_third = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, root_gb=1024, image_ref='aaa')
req_first = self._create_req(instance=instance_first)
req_third = self._create_req(instance=instance_third)
req_second = self._create_req(instance=instance_second)
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, sort_keys=['root_gb', 'image_ref'],
sort_dirs=['asc', 'desc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(3, len(req_list))
self.assertEqual(req_first.instance_uuid, req_list[0].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_first.instance,
req_list[0].instance))
self.assertEqual(req_second.instance_uuid, req_list[1].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_second.instance,
req_list[1].instance))
self.assertEqual(req_third.instance_uuid, req_list[2].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req_third.instance,
req_list[2].instance))
def test_get_by_filters_marker(self):
instance = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None)
reqs = [self._create_req(),
self._create_req(instance=instance),
self._create_req()]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, marker=instance.uuid, sort_keys=['id'],
sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(1, len(req_list))
req = req_list[0]
expected_req = reqs[2]
# The returned build request should be the last one in the reqs list
# since the marker is the 2nd item in the list (of 3).
self.assertEqual(expected_req.instance_uuid, req.instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(expected_req.instance,
req.instance))
def test_get_by_filters_marker_not_found(self):
self._create_req()
self.assertRaises(exception.MarkerNotFound,
build_request.BuildRequestList.get_by_filters,
self.context, {}, marker=uuidutils.generate_uuid(),
sort_keys=['id'], sort_dirs=['asc'])
def test_get_by_filters_limit(self):
reqs = [self._create_req(),
self._create_req(),
self._create_req()]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, limit=2, sort_keys=['id'],
sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
for i, req in enumerate(reqs[:2]):
self.assertEqual(req.instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req.instance,
req_list[i].instance))
def test_get_by_filters_marker_limit(self):
instance = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None)
reqs = [self._create_req(),
self._create_req(instance=instance),
self._create_req(),
self._create_req()]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, marker=instance.uuid, limit=2,
sort_keys=['id'], sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
for i, req in enumerate(reqs[2:]):
self.assertEqual(req.instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req.instance,
req_list[i].instance))
def test_get_by_filters_marker_overlimit(self):
instance = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None)
reqs = [self._create_req(),
self._create_req(instance=instance),
self._create_req(),
self._create_req()]
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {}, marker=instance.uuid, limit=4,
sort_keys=['id'], sort_dirs=['asc'])
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(2, len(req_list))
for i, req in enumerate(reqs[2:]):
self.assertEqual(req.instance_uuid, req_list[i].instance_uuid)
self.assertTrue(objects.base.obj_equal_prims(req.instance,
req_list[i].instance))
def test_get_by_filters_bails_on_empty_list_check(self):
instance1 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='')
instance2 = fake_instance.fake_instance_obj(
self.context, objects.Instance, uuid=uuidutils.generate_uuid(),
host=None, image_ref='')
self._create_req(instance=instance1)
self._create_req(instance=instance2)
req_list = build_request.BuildRequestList.get_by_filters(
self.context, {'image_ref': []})
self.assertIsInstance(req_list, objects.BuildRequestList)
self.assertEqual(0, len(req_list))
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
from oslo.config import cfg
from nova.compute import flavors
import nova.context
import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
from nova import objects
import nova.utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = nova.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
metadata = {}
flavors.save_flavor_info(metadata, flavor, '')
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': '5',
'system_metadata': metadata,
'extra_specs': {},
'user_id': context.user_id,
'project_id': context.project_id,
}
if obj:
instance = objects.Instance(context, **test_instance)
instance.create()
else:
instance = nova.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
def coreutils_readlink_available():
_out, err = nova.utils.trycmd('readlink', '-nm', '/')
return err == ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
def get_api_version(request):
if request.path[2:3].isdigit():
return int(request.path[2:3])
| |
from __future__ import absolute_import, print_function, division
__author__ = 'Alistair Miles <alimanfoo@googlemail.com>'
# standard library dependencies
import json
from json.encoder import JSONEncoder
# internal dependencies
from petl.util import data, RowContainer
from petl.util import dicts as asdicts
from petl.io.sources import read_source_from_arg, write_source_from_arg
def fromjson(source, *args, **kwargs):
"""
Extract data from a JSON file. The file must contain a JSON array as the top
level object, and each member of the array will be treated as a row of data.
E.g.::
>>> from petl import fromjson, look
>>> data = '[{"foo": "a", "bar": 1}, {"foo": "b", "bar": 2}, {"foo": "c", "bar": 2}]'
>>> with open('example1.json', 'w') as f:
... f.write(data)
...
>>> table1 = fromjson('example1.json')
>>> look(table1)
+--------+--------+
| u'foo' | u'bar' |
+========+========+
| u'a' | 1 |
+--------+--------+
| u'b' | 2 |
+--------+--------+
| u'c' | 2 |
+--------+--------+
If your JSON file does not fit this structure, you will need to parse it
via :func:`json.load` and select the array to treat as the data, see also
:func:`fromdicts`.
Supports transparent reading from URLs, ``.gz`` and ``.bz2`` files.
.. versionadded:: 0.5
"""
source = read_source_from_arg(source)
return JsonView(source, *args, **kwargs)
class JsonView(RowContainer):
def __init__(self, source, *args, **kwargs):
self.source = source
self.args = args
self.kwargs = kwargs
self.missing = kwargs.pop('missing', None)
self.header = kwargs.pop('header', None)
def __iter__(self):
with self.source.open_('rb') as f:
result = json.load(f, *self.args, **self.kwargs)
if self.header is None:
# determine fields
header = list()
for o in result:
if hasattr(o, 'keys'):
header.extend(k for k in o.keys() if k not in header)
else:
header = self.header
yield tuple(header)
# output data rows
for o in result:
row = tuple(o[f] if f in o else None for f in header)
yield row
def fromdicts(dicts, header=None):
"""
View a sequence of Python :class:`dict` as a table. E.g.::
>>> from petl import fromdicts, look
>>> dicts = [{"foo": "a", "bar": 1}, {"foo": "b", "bar": 2}, {"foo": "c", "bar": 2}]
>>> table = fromdicts(dicts)
>>> look(table)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | 1 |
+-------+-------+
| 'b' | 2 |
+-------+-------+
| 'c' | 2 |
+-------+-------+
See also :func:`fromjson`.
.. versionadded:: 0.5
"""
return DictsView(dicts, header=header)
class DictsView(RowContainer):
def __init__(self, dicts, header=None):
self.dicts = dicts
self.header = header
def __iter__(self):
result = self.dicts
if self.header is None:
# determine fields
header = list()
for o in result:
if hasattr(o, 'keys'):
header.extend(k for k in o.keys() if k not in header)
else:
header = self.header
yield tuple(header)
# output data rows
for o in result:
row = tuple(o[f] if f in o else None for f in header)
yield row
def tojson(table, source=None, prefix=None, suffix=None, *args, **kwargs):
"""
Write a table in JSON format, with rows output as JSON objects. E.g.::
>>> from petl import tojson, look
>>> look(table)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | 1 |
+-------+-------+
| 'b' | 2 |
+-------+-------+
| 'c' | 2 |
+-------+-------+
>>> tojson(table, 'example.json')
>>> # check what it did
... with open('example.json') as f:
... print f.read()
...
[{"foo": "a", "bar": 1}, {"foo": "b", "bar": 2}, {"foo": "c", "bar": 2}]
Note that this is currently not streaming, all data is loaded into memory
before being written to the file.
Supports transparent writing to ``.gz`` and ``.bz2`` files.
.. versionadded:: 0.5
"""
encoder = JSONEncoder(*args, **kwargs)
source = write_source_from_arg(source)
with source.open_('wb') as f:
if prefix is not None:
f.write(prefix)
for chunk in encoder.iterencode(list(asdicts(table))):
f.write(chunk)
if suffix is not None:
f.write(suffix)
def tojsonarrays(table, source=None, prefix=None, suffix=None,
output_header=False, *args, **kwargs):
"""
Write a table in JSON format, with rows output as JSON arrays. E.g.::
>>> from petl import tojsonarrays, look
>>> look(table)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | 1 |
+-------+-------+
| 'b' | 2 |
+-------+-------+
| 'c' | 2 |
+-------+-------+
>>> tojsonarrays(table, 'example.json')
>>> # check what it did
... with open('example.json') as f:
... print f.read()
...
[["a", 1], ["b", 2], ["c", 2]]
Note that this is currently not streaming, all data is loaded into memory
before being written to the file.
Supports transparent writing to ``.gz`` and ``.bz2`` files.
.. versionadded:: 0.11
"""
encoder = JSONEncoder(*args, **kwargs)
source = write_source_from_arg(source)
if output_header:
obj = list(table)
else:
obj = list(data(table))
with source.open_('wb') as f:
if prefix is not None:
f.write(prefix)
for chunk in encoder.iterencode(obj):
f.write(chunk)
if suffix is not None:
f.write(suffix)
| |
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from urlparse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True,
urlfetch_retries=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = (
redirect and
retries.redirect != 0 and
retries.total)
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if (self.urlfetch_retries and retries.raise_on_redirect):
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=http_response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method, redirect_url, body, headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader('Retry-After'))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| |
import os
import time
import datetime
import errno
from supervisor.options import readFile
from supervisor.options import tailFile
from supervisor.options import NotExecutable
from supervisor.options import NotFound
from supervisor.options import NoPermission
from supervisor.options import make_namespec
from supervisor.options import split_namespec
from supervisor.options import VERSION
from supervisor.events import notify
from supervisor.events import RemoteCommunicationEvent
from supervisor.http import NOT_DONE_YET
from supervisor.xmlrpc import Faults
from supervisor.xmlrpc import RPCError
from supervisor.states import SupervisorStates
from supervisor.states import getSupervisorStateDescription
from supervisor.states import ProcessStates
from supervisor.states import getProcessStateDescription
from supervisor.states import RUNNING_STATES
API_VERSION = '3.0'
class SupervisorNamespaceRPCInterface:
def __init__(self, supervisord):
self.supervisord = supervisord
def _update(self, text):
self.update_text = text # for unit tests, mainly
if self.supervisord.options.mood < SupervisorStates.RUNNING:
raise RPCError(Faults.SHUTDOWN_STATE)
# RPC API methods
def getAPIVersion(self):
""" Return the version of the RPC API used by supervisord
@return string version version id
"""
self._update('getAPIVersion')
return API_VERSION
getVersion = getAPIVersion # b/w compatibility with releases before 3.0
def getSupervisorVersion(self):
""" Return the version of the supervisor package in use by supervisord
@return string version version id
"""
self._update('getSupervisorVersion')
return VERSION
def getIdentification(self):
""" Return identifiying string of supervisord
@return string identifier identifying string
"""
self._update('getIdentification')
return self.supervisord.options.identifier
def getState(self):
""" Return current state of supervisord as a struct
@return struct A struct with keys string statecode, int statename
"""
self._update('getState')
state = self.supervisord.options.mood
statename = getSupervisorStateDescription(state)
data = {
'statecode':state,
'statename':statename,
}
return data
def getPID(self):
""" Return the PID of supervisord
@return int PID
"""
self._update('getPID')
return self.supervisord.options.get_pid()
def readLog(self, offset, length):
""" Read length bytes from the main log starting at offset
@param int offset offset to start reading from.
@param int length number of bytes to read from the log.
@return string result Bytes of log
"""
self._update('readLog')
logfile = self.supervisord.options.logfile
if logfile is None or not os.path.exists(logfile):
raise RPCError(Faults.NO_FILE, logfile)
try:
return readFile(logfile, int(offset), int(length))
except ValueError, inst:
why = inst.args[0]
raise RPCError(getattr(Faults, why))
readMainLog = readLog # b/w compatibility with releases before 2.1
def clearLog(self):
""" Clear the main log.
@return boolean result always returns True unless error
"""
self._update('clearLog')
logfile = self.supervisord.options.logfile
if logfile is None or not self.supervisord.options.exists(logfile):
raise RPCError(Faults.NO_FILE)
# there is a race condition here, but ignore it.
try:
self.supervisord.options.remove(logfile)
except (OSError, IOError):
raise RPCError(Faults.FAILED)
for handler in self.supervisord.options.logger.handlers:
if hasattr(handler, 'reopen'):
self.supervisord.options.logger.info('reopening log file')
handler.reopen()
return True
def shutdown(self):
""" Shut down the supervisor process
@return boolean result always returns True unless error
"""
self._update('shutdown')
self.supervisord.options.mood = SupervisorStates.SHUTDOWN
return True
def restart(self):
""" Restart the supervisor process
@return boolean result always return True unless error
"""
self._update('restart')
self.supervisord.options.mood = SupervisorStates.RESTARTING
return True
def reloadConfig(self):
"""
Reload configuration
@return boolean result always return True unless error
"""
self._update('reloadConfig')
try:
self.supervisord.options.process_config_file(do_usage=False)
except ValueError, msg:
raise RPCError(Faults.CANT_REREAD, msg)
added, changed, removed = self.supervisord.diff_to_active()
added = [group.name for group in added]
changed = [group.name for group in changed]
removed = [group.name for group in removed]
return [[added, changed, removed]] # cannot return len > 1, apparently
def addProcessGroup(self, name):
""" Update the config for a running process from config file.
@param string name name of process group to add
@return boolean result true if successful
"""
self._update('addProcessGroup')
for config in self.supervisord.options.process_group_configs:
if config.name == name:
result = self.supervisord.add_process_group(config)
if not result:
raise RPCError(Faults.ALREADY_ADDED, name)
return True
raise RPCError(Faults.BAD_NAME, name)
def removeProcessGroup(self, name):
""" Remove a stopped process from the active configuration.
@param string name name of process group to remove
@return boolean result Indicates wether the removal was successful
"""
self._update('removeProcessGroup')
if name not in self.supervisord.process_groups:
raise RPCError(Faults.BAD_NAME, name)
result = self.supervisord.remove_process_group(name)
if not result:
raise RPCError(Faults.STILL_RUNNING)
return True
def _getAllProcesses(self, lexical=False):
# if lexical is true, return processes sorted in lexical order,
# otherwise, sort in priority order
all_processes = []
if lexical:
group_names = self.supervisord.process_groups.keys()
group_names.sort()
for group_name in group_names:
group = self.supervisord.process_groups[group_name]
process_names = group.processes.keys()
process_names.sort()
for process_name in process_names:
process = group.processes[process_name]
all_processes.append((group, process))
else:
groups = self.supervisord.process_groups.values()
groups.sort() # asc by priority
for group in groups:
processes = group.processes.values()
processes.sort() # asc by priority
for process in processes:
all_processes.append((group, process))
return all_processes
def _getGroupAndProcess(self, name):
# get process to start from name
group_name, process_name = split_namespec(name)
group = self.supervisord.process_groups.get(group_name)
if group is None:
raise RPCError(Faults.BAD_NAME, name)
if process_name is None:
return group, None
process = group.processes.get(process_name)
if process is None:
raise RPCError(Faults.BAD_NAME, name)
return group, process
def startProcess(self, name, wait=True):
""" Start a process
@param string name Process name (or 'group:name', or 'group:*')
@param boolean wait Wait for process to be fully started
@return boolean result Always true unless error
"""
self._update('startProcess')
group, process = self._getGroupAndProcess(name)
if process is None:
group_name, process_name = split_namespec(name)
return self.startProcessGroup(group_name, wait)
# test filespec, don't bother trying to spawn if we know it will
# eventually fail
try:
filename, argv = process.get_execv_args()
except NotFound, why:
raise RPCError(Faults.NO_FILE, why.args[0])
except (NotExecutable, NoPermission), why:
raise RPCError(Faults.NOT_EXECUTABLE, why.args[0])
started = []
startsecs = process.config.startsecs
def startit():
if not started:
if process.get_state() in RUNNING_STATES:
raise RPCError(Faults.ALREADY_STARTED, name)
process.spawn()
if process.spawnerr:
raise RPCError(Faults.SPAWN_ERROR, name)
# we use a list here to fake out lexical scoping;
# using a direct assignment to 'started' in the
# function appears to not work (symptom: 2nd or 3rd
# call through, it forgets about 'started', claiming
# it's undeclared).
started.append(time.time())
if not wait or not startsecs:
return True
t = time.time()
runtime = (t - started[0])
state = process.get_state()
if state not in (ProcessStates.STARTING, ProcessStates.RUNNING):
raise RPCError(Faults.ABNORMAL_TERMINATION, name)
if runtime < startsecs:
return NOT_DONE_YET
if state == ProcessStates.RUNNING:
return True
raise RPCError(Faults.ABNORMAL_TERMINATION, name)
startit.delay = 0.05
startit.rpcinterface = self
return startit # deferred
def startProcessGroup(self, name, wait=True):
""" Start all processes in the group named 'name'
@param string name The group name
@param boolean wait Wait for each process to be fully started
@return struct result A structure containing start statuses
"""
self._update('startProcessGroup')
group = self.supervisord.process_groups.get(name)
if group is None:
raise RPCError(Faults.BAD_NAME, name)
processes = group.processes.values()
processes.sort()
processes = [ (group, process) for process in processes ]
startall = make_allfunc(processes, isNotRunning, self.startProcess,
wait=wait)
startall.delay = 0.05
startall.rpcinterface = self
return startall # deferred
def startAllProcesses(self, wait=True):
""" Start all processes listed in the configuration file
@param boolean wait Wait for each process to be fully started
@return struct result A structure containing start statuses
"""
self._update('startAllProcesses')
processes = self._getAllProcesses()
startall = make_allfunc(processes, isNotRunning, self.startProcess,
wait=wait)
startall.delay = 0.05
startall.rpcinterface = self
return startall # deferred
def stopProcess(self, name, wait=True):
""" Stop a process named by name
@param string name The name of the process to stop (or 'group:name')
@param boolean wait Wait for the process to be fully stopped
@return boolean result Always return True unless error
"""
self._update('stopProcess')
group, process = self._getGroupAndProcess(name)
if process is None:
group_name, process_name = split_namespec(name)
return self.stopProcessGroup(group_name, wait)
stopped = []
called = []
def killit():
if not called:
if process.get_state() not in RUNNING_STATES:
raise RPCError(Faults.NOT_RUNNING)
# use a mutable for lexical scoping; see startProcess
called.append(1)
if not stopped:
msg = process.stop()
if msg is not None:
raise RPCError(Faults.FAILED, msg)
stopped.append(1)
if wait:
return NOT_DONE_YET
else:
return True
if process.get_state() not in (ProcessStates.STOPPED,
ProcessStates.EXITED):
return NOT_DONE_YET
else:
return True
killit.delay = 0.2
killit.rpcinterface = self
return killit # deferred
def stopProcessGroup(self, name, wait=True):
""" Stop all processes in the process group named 'name'
@param string name The group name
@param boolean wait Wait for each process to be fully stopped
@return boolean result Always return true unless error.
"""
self._update('stopProcessGroup')
group = self.supervisord.process_groups.get(name)
if group is None:
raise RPCError(Faults.BAD_NAME, name)
processes = group.processes.values()
processes.sort()
processes = [ (group, process) for process in processes ]
killall = make_allfunc(processes, isRunning, self.stopProcess,
wait=wait)
killall.delay = 0.05
killall.rpcinterface = self
return killall # deferred
def stopAllProcesses(self, wait=True):
""" Stop all processes in the process list
@param boolean wait Wait for each process to be fully stopped
@return boolean result Always return true unless error.
"""
self._update('stopAllProcesses')
processes = self._getAllProcesses()
killall = make_allfunc(processes, isRunning, self.stopProcess,
wait=wait)
killall.delay = 0.05
killall.rpcinterface = self
return killall # deferred
def getAllConfigInfo(self):
""" Get info about all availible process configurations. Each record
represents a single process (i.e. groups get flattened).
@return array result An array of process config info records
"""
self._update('getAllConfigInfo')
configinfo = []
for gconfig in self.supervisord.options.process_group_configs:
inuse = gconfig.name in self.supervisord.process_groups
for pconfig in gconfig.process_configs:
configinfo.append(
{ 'name': pconfig.name,
'group': gconfig.name,
'inuse': inuse,
'autostart': pconfig.autostart,
'group_prio': gconfig.priority,
'process_prio': pconfig.priority })
configinfo.sort()
return configinfo
def _interpretProcessInfo(self, info):
state = info['state']
if state == ProcessStates.RUNNING:
start = info['start']
now = info['now']
start_dt = datetime.datetime(*time.gmtime(start)[:6])
now_dt = datetime.datetime(*time.gmtime(now)[:6])
uptime = now_dt - start_dt
desc = 'pid %s, uptime %s' % (info['pid'], uptime)
if info['resumed']:
desc += ' [resumed]'
elif state in (ProcessStates.FATAL, ProcessStates.BACKOFF):
desc = info['spawnerr']
if not desc:
desc = 'unknown error (try "tail %s")' % info['name']
elif state in (ProcessStates.STOPPED, ProcessStates.EXITED):
if info['start']:
stop = info['stop']
stop_dt = datetime.datetime(*time.localtime(stop)[:7])
desc = stop_dt.strftime('%b %d %I:%M %p')
else:
desc = 'Not started'
else:
desc = ''
return desc
def getProcessInfo(self, name):
""" Get info about a process named name
@param string name The name of the process (or 'group:name')
@return struct result A structure containing data about the process
"""
self._update('getProcessInfo')
group, process = self._getGroupAndProcess(name)
start = int(process.laststart)
stop = int(process.laststop)
now = int(time.time())
state = process.get_state()
spawnerr = process.spawnerr or ''
exitstatus = process.exitstatus or 0
stdout_logfile = process.config.stdout_logfile or ''
stderr_logfile = process.config.stderr_logfile or ''
info = {
'name':process.config.name,
'group':group.config.name,
'start':start,
'stop':stop,
'now':now,
'state':state,
'statename':getProcessStateDescription(state),
'spawnerr':spawnerr,
'exitstatus':exitstatus,
'logfile':stdout_logfile, # b/c alias
'stdout_logfile':stdout_logfile,
'stderr_logfile':stderr_logfile,
'pid':process.pid,
'resumed':process.resumed,
}
description = self._interpretProcessInfo(info)
info['description'] = description
return info
def getAllProcessInfo(self):
""" Get info about all processes
@return array result An array of process status results
"""
self._update('getAllProcessInfo')
all_processes = self._getAllProcesses(lexical=True)
output = []
for group, process in all_processes:
name = make_namespec(group.config.name, process.config.name)
output.append(self.getProcessInfo(name))
return output
def _readProcessLog(self, name, offset, length, channel):
group, process = self._getGroupAndProcess(name)
logfile = getattr(process.config, '%s_logfile' % channel)
if logfile is None or not os.path.exists(logfile):
raise RPCError(Faults.NO_FILE, logfile)
try:
return readFile(logfile, int(offset), int(length))
except ValueError, inst:
why = inst.args[0]
raise RPCError(getattr(Faults, why))
def readProcessStdoutLog(self, name, offset, length):
""" Read length bytes from name's stdout log starting at offset
@param string name the name of the process (or 'group:name')
@param int offset offset to start reading from.
@param int length number of bytes to read from the log.
@return string result Bytes of log
"""
self._update('readProcessStdoutLog')
return self._readProcessLog(name, offset, length, 'stdout')
readProcessLog = readProcessStdoutLog # b/c alias
def readProcessStderrLog(self, name, offset, length):
""" Read length bytes from name's stderr log starting at offset
@param string name the name of the process (or 'group:name')
@param int offset offset to start reading from.
@param int length number of bytes to read from the log.
@return string result Bytes of log
"""
self._update('readProcessStderrLog')
return self._readProcessLog(name, offset, length, 'stderr')
def _tailProcessLog(self, name, offset, length, channel):
group, process = self._getGroupAndProcess(name)
logfile = getattr(process.config, '%s_logfile' % channel)
if logfile is None or not os.path.exists(logfile):
return ['', 0, False]
return tailFile(logfile, int(offset), int(length))
def tailProcessStdoutLog(self, name, offset, length):
"""
Provides a more efficient way to tail the (stdout) log than
readProcessStdoutLog(). Use readProcessStdoutLog() to read
chunks and tailProcessStdoutLog() to tail.
Requests (length) bytes from the (name)'s log, starting at
(offset). If the total log size is greater than (offset +
length), the overflow flag is set and the (offset) is
automatically increased to position the buffer at the end of
the log. If less than (length) bytes are available, the
maximum number of available bytes will be returned. (offset)
returned is always the last offset in the log +1.
@param string name the name of the process (or 'group:name')
@param int offset offset to start reading from
@param int length maximum number of bytes to return
@return array result [string bytes, int offset, bool overflow]
"""
self._update('tailProcessStdoutLog')
return self._tailProcessLog(name, offset, length, 'stdout')
tailProcessLog = tailProcessStdoutLog # b/c alias
def tailProcessStderrLog(self, name, offset, length):
"""
Provides a more efficient way to tail the (stderr) log than
readProcessStderrLog(). Use readProcessStderrLog() to read
chunks and tailProcessStderrLog() to tail.
Requests (length) bytes from the (name)'s log, starting at
(offset). If the total log size is greater than (offset +
length), the overflow flag is set and the (offset) is
automatically increased to position the buffer at the end of
the log. If less than (length) bytes are available, the
maximum number of available bytes will be returned. (offset)
returned is always the last offset in the log +1.
@param string name the name of the process (or 'group:name')
@param int offset offset to start reading from
@param int length maximum number of bytes to return
@return array result [string bytes, int offset, bool overflow]
"""
self._update('tailProcessStderrLog')
return self._tailProcessLog(name, offset, length, 'stderr')
def clearProcessLogs(self, name):
""" Clear the stdout and stderr logs for the named process and
reopen them.
@param string name The name of the process (or 'group:name')
@return boolean result Always True unless error
"""
self._update('clearProcessLogs')
group, process = self._getGroupAndProcess(name)
try:
# implies a reopen
process.removelogs()
except (IOError, OSError):
raise RPCError(Faults.FAILED, name)
return True
clearProcessLog = clearProcessLogs # b/c alias
def clearAllProcessLogs(self):
""" Clear all process log files
@return boolean result Always return true
"""
self._update('clearAllProcessLogs')
results = []
callbacks = []
all_processes = self._getAllProcesses()
for group, process in all_processes:
callbacks.append((group, process, self.clearProcessLog))
def clearall():
if not callbacks:
return results
group, process, callback = callbacks.pop(0)
name = make_namespec(group.config.name, process.config.name)
try:
callback(name)
except RPCError, e:
results.append(
{'name':process.config.name,
'group':group.config.name,
'status':e.code,
'description':e.text})
else:
results.append(
{'name':process.config.name,
'group':group.config.name,
'status':Faults.SUCCESS,
'description':'OK'}
)
if callbacks:
return NOT_DONE_YET
return results
clearall.delay = 0.05
clearall.rpcinterface = self
return clearall # deferred
def sendProcessStdin(self, name, chars):
""" Send a string of chars to the stdin of the process name.
If non-7-bit data is sent (unicode), it is encoded to utf-8
before being sent to the process' stdin. If chars is not a
string or is not unicode, raise INCORRECT_PARAMETERS. If the
process is not running, raise NOT_RUNNING. If the process'
stdin cannot accept input (e.g. it was closed by the child
process), raise NO_FILE.
@param string name The process name to send to (or 'group:name')
@param string chars The character data to send to the process
@return boolean result Always return True unless error
"""
self._update('sendProcessStdin')
if isinstance(chars, unicode):
chars = chars.encode('utf-8')
if not isinstance(chars, basestring):
raise RPCError(Faults.INCORRECT_PARAMETERS, chars)
group, process = self._getGroupAndProcess(name)
if process is None:
raise RPCError(Faults.BAD_NAME, name)
if not process.pid or process.killing:
raise RPCError(Faults.NOT_RUNNING, name)
try:
process.write(chars)
except OSError, why:
if why[0] == errno.EPIPE:
raise RPCError(Faults.NO_FILE, name)
else:
raise
return True
def sendRemoteCommEvent(self, type, data):
""" Send an event that will be received by event listener
subprocesses subscribing to the RemoteCommunicationEvent.
@param string type String for the "type" key in the event header
@param string data Data for the event body
@return boolean Always return True unless error
"""
if isinstance(type, unicode):
type = type.encode('utf-8')
if isinstance(data, unicode):
data = data.encode('utf-8')
notify(
RemoteCommunicationEvent(type, data)
)
return True
def make_allfunc(processes, predicate, func, **extra_kwargs):
""" Return a closure representing a function that calls a
function for every process, and returns a result """
callbacks = []
results = []
def allfunc(processes=processes, predicate=predicate, func=func,
extra_kwargs=extra_kwargs, callbacks=callbacks,
results=results):
if not callbacks:
for group, process in processes:
name = make_namespec(group.config.name, process.config.name)
if predicate(process):
try:
callback = func(name, **extra_kwargs)
callbacks.append((group, process, callback))
except RPCError, e:
results.append({'name':process.config.name,
'group':group.config.name,
'status':e.code,
'description':e.text})
continue
if not callbacks:
return results
group, process, callback = callbacks.pop(0)
try:
value = callback()
except RPCError, e:
results.append(
{'name':process.config.name,
'group':group.config.name,
'status':e.code,
'description':e.text})
return NOT_DONE_YET
if value is NOT_DONE_YET:
# push it back into the queue; it will finish eventually
callbacks.append((group, process, callback))
else:
results.append(
{'name':process.config.name,
'group':group.config.name,
'status':Faults.SUCCESS,
'description':'OK'}
)
if callbacks:
return NOT_DONE_YET
return results
# XXX the above implementation has a weakness inasmuch as the
# first call into each individual process callback will always
# return NOT_DONE_YET, so they need to be called twice. The
# symptom of this is that calling this method causes the
# client to block for much longer than it actually requires to
# kill all of the running processes. After the first call to
# the killit callback, the process is actually dead, but the
# above killall method processes the callbacks one at a time
# during the select loop, which, because there is no output
# from child processes after e.g. stopAllProcesses is called,
# is not busy, so hits the timeout for each callback. I
# attempted to make this better, but the only way to make it
# better assumes totally synchronous reaping of child
# processes, which requires infrastructure changes to
# supervisord that are scary at the moment as it could take a
# while to pin down all of the platform differences and might
# require a C extension to the Python signal module to allow
# the setting of ignore flags to signals.
return allfunc
def isRunning(process):
if process.get_state() in RUNNING_STATES:
return True
def isNotRunning(process):
return not isRunning(process)
# this is not used in code but referenced via an entry point in the conf file
def make_main_rpcinterface(supervisord):
return SupervisorNamespaceRPCInterface(supervisord)
| |
from unittest.mock import Mock
import requests
from django.test import RequestFactory
from django_cas_ng.utils import get_redirect_url, get_service_url, get_cas_client
#
# get_service_url tests
#
def test_service_url_helper():
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request)
expected = 'http://testserver/login/?next=%2F'
assert actual == expected
def test_service_url_helper_as_https():
factory = RequestFactory()
kwargs = {'secure': True, 'wsgi.url_scheme': 'https', 'SERVER_PORT': '443'}
request = factory.get('/login/', **kwargs)
actual = get_service_url(request)
expected = 'https://testserver/login/?next=%2F'
assert actual == expected
def test_service_url_helper_with_redirect():
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request, redirect_to='http://testserver/landing-page/')
expected = 'http://testserver/login/?next=http%3A%2F%2Ftestserver%2Flanding-page%2F'
assert actual == expected
def test_service_url_preserves_query_parameters():
factory = RequestFactory()
request = factory.get('/login/?foo=bar', secure=True)
actual = get_service_url(request, redirect_to='https://testserver/landing-page/')
assert 'next=https%3A%2F%2Ftestserver%2Flanding-page%2F' in actual
def test_service_url_avoids_next(settings):
settings.CAS_STORE_NEXT = True
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request, redirect_to='/admin/')
expected = 'http://testserver/login/'
assert actual == expected
def test_service_url_root_proxied_as(settings):
settings.CAS_ROOT_PROXIED_AS = 'https://foo.bar:8443'
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request)
expected = 'https://foo.bar:8443/login/?next=%2F'
assert actual == expected
def test_service_url_root_proxied_as_empty_string(settings):
"""
If the settings module has the attribute CAS_ROOT_PROXIED_AS but its value
is an empty string (or another falsy value), we must make sure the setting
is not considered while constructing the redirect url.
"""
settings.CAS_ROOT_PROXIED_AS = ''
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request)
expected = 'http://testserver/login/?next=%2F'
assert actual == expected
def test_force_ssl_service_url(settings):
settings.CAS_FORCE_SSL_SERVICE_URL = True
factory = RequestFactory()
request = factory.get('/login/')
actual = get_service_url(request)
expected = 'https://testserver/login/?next=%2F'
assert actual == expected
#
# get_redirect_url tests
#
def test_redirect_url_with_url_as_get_parameter():
factory = RequestFactory()
request = factory.get('/login/', data={'next': '/landing-page/'})
actual = get_redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_redirect_url_falls_back_to_cas_redirect_url_setting(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = '/landing-page/'
factory = RequestFactory()
request = factory.get('/login/')
actual = get_redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_params_redirect_url_preceeds_settings_redirect_url(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = '/landing-page/'
factory = RequestFactory()
request = factory.get('/login/', data={'next': '/override/'})
actual = get_redirect_url(request)
expected = '/override/'
assert actual == expected
def test_redirect_url_falls_back_to_http_referrer(settings):
settings.CAS_IGNORE_REFERER = False
settings.CAS_REDIRECT_URL = '/wrong-landing-page/'
factory = RequestFactory()
request = factory.get('/login/', HTTP_REFERER='/landing-page/')
actual = get_redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_redirect_url_strips_domain_prefix(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = 'http://testserver/landing-page/'
factory = RequestFactory()
request = factory.get('/login/')
actual = get_redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_redirect_url_named_pattern(settings):
settings.CAS_IGNORE_REFERER = False
settings.CAS_REDIRECT_URL = 'home'
factory = RequestFactory()
request = factory.get('/login/')
actual = get_redirect_url(request)
expected = '/'
assert actual == expected
def test_redirect_url_named_pattern_without_referrer(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = 'home'
factory = RequestFactory()
request = factory.get('/login/', HTTP_REFERER='/landing-page/')
actual = get_redirect_url(request)
expected = '/'
assert actual == expected
def test_redirect_url_referrer_no_named_pattern(settings):
settings.CAS_IGNORE_REFERER = False
settings.CAS_REDIRECT_URL = '/wrong-landing-page/'
factory = RequestFactory()
request = factory.get('/login/', HTTP_REFERER='home')
actual = get_redirect_url(request)
expected = 'home'
assert actual == expected
def test_redirect_url_next_no_named_pattern(settings):
settings.CAS_IGNORE_REFERER = False
settings.CAS_REDIRECT_URL = '/wrong-landing-page/'
factory = RequestFactory()
request = factory.get('/login/', data={'next': 'home'})
actual = get_redirect_url(request)
expected = 'home'
assert actual == expected
def test_session_factory(settings):
session = requests.Session()
settings.CAS_SESSION_FACTORY = Mock(return_value=session)
client = get_cas_client()
assert settings.CAS_SESSION_FACTORY.called
assert client.session is session
| |
"""Tests for the Hyperion integration."""
from datetime import timedelta
from unittest.mock import AsyncMock, call, patch
from hyperion.const import (
KEY_COMPONENT,
KEY_COMPONENTID_ALL,
KEY_COMPONENTID_TO_NAME,
KEY_COMPONENTSTATE,
KEY_STATE,
)
from homeassistant.components.hyperion import get_hyperion_device_id
from homeassistant.components.hyperion.const import (
DOMAIN,
HYPERION_MANUFACTURER_NAME,
HYPERION_MODEL_NAME,
TYPE_HYPERION_COMPONENT_SWITCH_BASE,
)
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import RELOAD_AFTER_UPDATE_DELAY
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt, slugify
from . import (
TEST_CONFIG_ENTRY_ID,
TEST_INSTANCE,
TEST_INSTANCE_1,
TEST_SYSINFO_ID,
call_registered_callback,
create_mock_client,
register_test_entity,
setup_test_config_entry,
)
from tests.common import async_fire_time_changed
TEST_COMPONENTS = [
{"enabled": True, "name": "ALL"},
{"enabled": True, "name": "SMOOTHING"},
{"enabled": True, "name": "BLACKBORDER"},
{"enabled": False, "name": "FORWARDER"},
{"enabled": False, "name": "BOBLIGHTSERVER"},
{"enabled": False, "name": "GRABBER"},
{"enabled": False, "name": "V4L"},
{"enabled": True, "name": "LEDDEVICE"},
]
TEST_SWITCH_COMPONENT_BASE_ENTITY_ID = "switch.test_instance_1_component"
TEST_SWITCH_COMPONENT_ALL_ENTITY_ID = f"{TEST_SWITCH_COMPONENT_BASE_ENTITY_ID}_all"
async def test_switch_turn_on_off(hass: HomeAssistant) -> None:
"""Test turning the light on."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=True)
client.components = TEST_COMPONENTS
# Setup component switch.
register_test_entity(
hass,
SWITCH_DOMAIN,
f"{TYPE_HYPERION_COMPONENT_SWITCH_BASE}_all",
TEST_SWITCH_COMPONENT_ALL_ENTITY_ID,
)
await setup_test_config_entry(hass, hyperion_client=client)
# Verify switch is on (as per TEST_COMPONENTS above).
entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)
assert entity_state
assert entity_state.state == "on"
# Turn switch off.
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_SWITCH_COMPONENT_ALL_ENTITY_ID},
blocking=True,
)
# Verify correct parameters are passed to the library.
assert client.async_send_set_component.call_args == call(
**{KEY_COMPONENTSTATE: {KEY_COMPONENT: KEY_COMPONENTID_ALL, KEY_STATE: False}}
)
client.components[0] = {
"enabled": False,
"name": "ALL",
}
call_registered_callback(client, "components-update")
# Verify the switch turns off.
entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)
assert entity_state
assert entity_state.state == "off"
# Turn switch on.
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_SWITCH_COMPONENT_ALL_ENTITY_ID},
blocking=True,
)
# Verify correct parameters are passed to the library.
assert client.async_send_set_component.call_args == call(
**{KEY_COMPONENTSTATE: {KEY_COMPONENT: KEY_COMPONENTID_ALL, KEY_STATE: True}}
)
client.components[0] = {
"enabled": True,
"name": "ALL",
}
call_registered_callback(client, "components-update")
# Verify the switch turns on.
entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)
assert entity_state
assert entity_state.state == "on"
async def test_switch_has_correct_entities(hass: HomeAssistant) -> None:
"""Test that the correct switch entities are created."""
client = create_mock_client()
client.components = TEST_COMPONENTS
# Setup component switch.
for component in TEST_COMPONENTS:
name = slugify(KEY_COMPONENTID_TO_NAME[str(component["name"])])
register_test_entity(
hass,
SWITCH_DOMAIN,
f"{TYPE_HYPERION_COMPONENT_SWITCH_BASE}_{name}",
f"{TEST_SWITCH_COMPONENT_BASE_ENTITY_ID}_{name}",
)
await setup_test_config_entry(hass, hyperion_client=client)
for component in TEST_COMPONENTS:
name = slugify(KEY_COMPONENTID_TO_NAME[str(component["name"])])
entity_id = TEST_SWITCH_COMPONENT_BASE_ENTITY_ID + "_" + name
entity_state = hass.states.get(entity_id)
assert entity_state, f"Couldn't find entity: {entity_id}"
async def test_device_info(hass: HomeAssistant) -> None:
"""Verify device information includes expected details."""
client = create_mock_client()
client.components = TEST_COMPONENTS
for component in TEST_COMPONENTS:
name = slugify(KEY_COMPONENTID_TO_NAME[str(component["name"])])
register_test_entity(
hass,
SWITCH_DOMAIN,
f"{TYPE_HYPERION_COMPONENT_SWITCH_BASE}_{name}",
f"{TEST_SWITCH_COMPONENT_BASE_ENTITY_ID}_{name}",
)
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID) is not None
device_identifer = get_hyperion_device_id(TEST_SYSINFO_ID, TEST_INSTANCE)
device_registry = dr.async_get(hass)
device = device_registry.async_get_device({(DOMAIN, device_identifer)})
assert device
assert device.config_entries == {TEST_CONFIG_ENTRY_ID}
assert device.identifiers == {(DOMAIN, device_identifer)}
assert device.manufacturer == HYPERION_MANUFACTURER_NAME
assert device.model == HYPERION_MODEL_NAME
assert device.name == TEST_INSTANCE_1["friendly_name"]
entity_registry = await er.async_get_registry(hass)
entities_from_device = [
entry.entity_id
for entry in er.async_entries_for_device(entity_registry, device.id)
]
for component in TEST_COMPONENTS:
name = slugify(KEY_COMPONENTID_TO_NAME[str(component["name"])])
entity_id = TEST_SWITCH_COMPONENT_BASE_ENTITY_ID + "_" + name
assert entity_id in entities_from_device
async def test_switches_can_be_enabled(hass: HomeAssistant) -> None:
"""Verify switches can be enabled."""
client = create_mock_client()
client.components = TEST_COMPONENTS
await setup_test_config_entry(hass, hyperion_client=client)
entity_registry = er.async_get(hass)
for component in TEST_COMPONENTS:
name = slugify(KEY_COMPONENTID_TO_NAME[str(component["name"])])
entity_id = TEST_SWITCH_COMPONENT_BASE_ENTITY_ID + "_" + name
entry = entity_registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
entity_state = hass.states.get(entity_id)
assert not entity_state
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=client,
):
updated_entry = entity_registry.async_update_entity(
entity_id, disabled_by=None
)
assert not updated_entry.disabled
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
entity_state = hass.states.get(entity_id)
assert entity_state
| |
import json
import yaml
import logging
import os
from copy import copy
from yaml.scanner import ScannerError
logger = logging.getLogger('loader')
class NeckbeardLoader(object):
"""
The loader takes a directory of Neckbeard configuration files and spits out
a Neckbeard instance with the gathered configuration.
Along the way, it also does bare minimum validation, ensuring that:
* We're not missing any required files
* Everything is valid JSON or YAML
* Everything is properly versioned with a `neckbeard_conf_version`
* JSON/YAML properties that should agree with the directory structure
actually do that (you can't put an `ec2` node_template in an `rds`
directory).
"""
VALIDATION_MESSAGES = {
'invalid_configuration_directory': (
"The configuration directory "
"does not exist or is not accessible."
),
'invalid_json': (
"Invalid JSON. Check for trailing commas. "
"Error: %(error)s"
),
'invalid_yaml': (
"Invalid YAML. "
"Error: %(error)s"
),
'duplicate_config': (
"JSON and YAML files with same name should not be present. "
"File: %(filename)s"
),
'missing_file': "File is required, but missing.",
'missing_environment': (
"You need at least one environment configuration, "
"or what are we really doing here? "
"I recommend starting with <%(file_path)s/staging.json>"
),
'missing_option': (
"The option '%(option_name)s' is required, but missing."
),
'file_option_mismatch': (
"The option '%(option_name)s' doesn't match its folder structure. "
"Expected: '%(expected)s' But found: '%(actual)s'"
),
}
CONFIG_STRUCTURE = {
"constants": None,
"neckbeard_meta": None,
"secrets": None,
"secrets.tpl": None,
"environments": {},
"node_templates": {
"ec2": {},
"rds": {},
"elb": {},
},
}
ROOT_CONF_FILES = [
'constants',
'neckbeard_meta',
'secrets',
'secrets.tpl',
]
VERSION_OPTION = 'neckbeard_conf_version'
def __init__(self, configuration_directory):
self.configuration_directory = configuration_directory
# A dictionary of errors keyed based on the file to which they are
# related. The error itself is a 2-tuple of the ErrorType plus a
# message.
self.validation_errors = {}
self.raw_configuration = copy(self.CONFIG_STRUCTURE)
def _all_config_files(self, directory):
"""
Generator to iterate through all of the JSON and YAML files in a
directory.
"""
for path, dirs, files in os.walk(directory):
for f in files:
full_fp = os.path.join(path, f)
extensionless_fp = full_fp[:-5]
if f.endswith('.json') or f.endswith('.yaml'):
yield extensionless_fp
def _add_validation_error(self, file_path, error_type, extra_context=None):
if not file_path in self.validation_errors:
self.validation_errors[file_path] = {}
if not error_type in self.validation_errors[file_path]:
self.validation_errors[file_path][error_type] = []
context = {'file_path': file_path}
if extra_context:
context.update(extra_context)
error_message = self.VALIDATION_MESSAGES[error_type] % context
logger.debug("Validation Error: %s", error_message)
self.validation_errors[file_path][error_type].append(error_message)
def _add_path_relative_validation_error(
self, relative_path, error_type, extra_context=None,
):
file_path = os.path.join(self.configuration_directory, relative_path)
self._add_validation_error(file_path, error_type, extra_context)
def print_validation_errors(self):
for file_path, error_types in self.validation_errors.items():
logger.warning("%s errors:", file_path)
for error_type, errors in error_types.items():
for error in errors:
logger.warning(" %s", error)
def _get_config_from_file(self, file_path):
json_exists = os.path.isfile('%s.json' % file_path)
yaml_exists = os.path.isfile('%s.yaml' % file_path)
if json_exists and yaml_exists:
_, name = os.path.split(file_path)
self._add_validation_error(
file_path,
'duplicate_config',
extra_context={'filename': name},
)
return {}
if json_exists:
return self._get_data_from_file(file_path, json, 'json')
elif yaml_exists:
return self._get_data_from_file(file_path, yaml, 'yaml')
else:
self._add_validation_error(
file_path,
'missing_file',
)
return {}
def _get_data_from_file(self, extensionless_file_path, parser, file_type):
file_path = '%s.%s' % (extensionless_file_path, file_type)
try:
with open(file_path, 'r') as fp:
try:
return parser.load(fp)
except (ValueError, ScannerError) as e:
logger.debug(
"Error parsing %s file: %s",
file_type,
file_path,
)
logger.debug("%s", e)
self._add_validation_error(
file_path,
'invalid_%s' % file_type.lower(),
extra_context={'error': e},
)
return {}
except IOError as e:
logger.debug("Error opening %s file: %s", file_type, file_path)
logger.debug("%s", e)
self._add_validation_error(
file_path,
'missing_file',
)
return {}
def _get_name_from_conf_file_path(self, file_path):
"""
Given a file path to a json/yaml config file, get the file's
path-roomed and .json/.yaml-removed name. For environment files, this
is the environment name. For node_templates, the template name, etc.
"""
_, tail = os.path.split(file_path)
# if tail.endswith('.json'):
# name, _ = tail.rsplit('.json', 1)
# elif tail.endswith('.yaml'):
# name, _ = tail.rsplit('.yaml', 1)
# TODO: confirm that it will never be the case that tail ends with
# somethign else
return tail
def _load_root_configuration_files(self, configuration_directory):
root_configs = {}
for conf_file in self.ROOT_CONF_FILES:
extensionless_fp = os.path.join(configuration_directory, conf_file)
root_configs[conf_file] = self._get_config_from_file(
extensionless_fp,
)
return root_configs
def _load_environment_files(self, configuration_directory):
environment_dir = os.path.join(configuration_directory, 'environments')
configs = {}
for environment_config_fp in self._all_config_files(environment_dir):
name = self._get_name_from_conf_file_path(environment_config_fp)
configs[name] = self._get_config_from_file(environment_config_fp)
if len(configs) == 0:
# There were no environment files. That's a problem
self._add_validation_error(
environment_dir,
'missing_environment',
)
return configs
def _load_node_template_files(self, configuration_directory):
node_templates_dir = os.path.join(
configuration_directory,
'node_templates',
)
configs = copy(self.CONFIG_STRUCTURE['node_templates'])
# If there aren't any node_templates, no sweat
if not os.path.exists(node_templates_dir):
logger.debug("No node_templates configuration found")
return configs
# Gather up node_templates for the various AWS node types
aws_types = configs.keys()
for aws_type in aws_types:
node_type_dir = os.path.join(node_templates_dir, aws_type)
if not os.path.exists(node_type_dir):
logger.debug(
"No %s node_templates configurations found",
aws_type,
)
continue
for node_config_fp in self._all_config_files(node_type_dir):
name = self._get_name_from_conf_file_path(node_config_fp)
configs[aws_type][name] = self._get_config_from_file(
node_config_fp,
)
return configs
def _load_configuration_files(self, configuration_directory):
if not os.path.exists(configuration_directory):
self._add_validation_error(
configuration_directory,
'invalid_configuration_directory',
)
return {}
config = self._load_root_configuration_files(configuration_directory)
environments = self._load_environment_files(configuration_directory)
config['environments'] = environments
node_templates = self._load_node_template_files(
configuration_directory,
)
config['node_templates'] = node_templates
return config
def _validate_option_agrees(
self, relative_path, name, expected_value, config, required=True,
):
actual_value = config.get(name)
if actual_value is None:
if required:
self._add_path_relative_validation_error(
relative_path,
'missing_option',
extra_context={'option_name': name},
)
return
elif actual_value != expected_value:
self._add_path_relative_validation_error(
relative_path,
'file_option_mismatch',
extra_context={
'option_name': name,
'expected': expected_value,
'actual': actual_value,
},
)
def _validate_node_template_agreement(self, raw_configuration):
"""
Ensure that the `node_aws_type` and `node_template_name` for each
`node_template` configuration actually agrees with the folder structure
in which it was contained.
A `node_templates/ec2/foo.json` template file should not say it's an
`rds` node called `bar`.
"""
raw_node_template_config = raw_configuration['node_templates']
for aws_type, node_templates in raw_node_template_config.items():
for node_template_name, config in node_templates.items():
# Check for existence and folder structure mismatch
relative_path = 'node_templates/%s/%s.json' % (
aws_type,
node_template_name,
)
self._validate_option_agrees(
relative_path,
'node_aws_type',
aws_type,
config,
)
self._validate_option_agrees(
relative_path,
'node_template_name',
node_template_name,
config,
)
def _validate_environment_name_agreement(self, raw_configuration):
environments_config = raw_configuration['environments']
for environment_name, config in environments_config.items():
# Check for existence and folder structure mismatch
relative_path = 'environments/%s.json' % environment_name
self._validate_option_agrees(
relative_path,
'name',
environment_name,
config,
)
def _validate_neckbeard_conf_version(self, raw_configuration):
# Check all of the root configuration files
for root_conf in self.ROOT_CONF_FILES:
if not raw_configuration[root_conf].get(self.VERSION_OPTION):
relative_path = '%s.json' % root_conf
self._add_path_relative_validation_error(
relative_path,
'missing_option',
extra_context={
'option_name': self.VERSION_OPTION,
},
)
# Check all of the environment configs
for name, config in raw_configuration['environments'].items():
if not config.get(self.VERSION_OPTION):
relative_path = 'environments/%s.json' % name
self._add_path_relative_validation_error(
relative_path,
'missing_option',
extra_context={
'option_name': self.VERSION_OPTION,
},
)
# Check all of the node_templates
all_node_templates = raw_configuration.get('node_templates', {})
for aws_type, node_templates in all_node_templates.items():
for node_template_name, config in node_templates.items():
if not config.get(self.VERSION_OPTION):
relative_path = 'node_templates/%s/%s.json' % (
aws_type,
node_template_name,
)
self._add_path_relative_validation_error(
relative_path,
'missing_option',
extra_context={
'option_name': self.VERSION_OPTION,
},
)
def _validate_configuration(self):
self.validation_errors = {}
self.raw_configuration = self._load_configuration_files(
self.configuration_directory,
)
if len(self.validation_errors) > 0:
# If there are errors loading/parsing the files, don't attempt
# further validation
return
self._validate_neckbeard_conf_version(self.raw_configuration)
if len(self.validation_errors) > 0:
# If we can't determine the configuration version of the files, we
# can't rely on any other validation
return
self._validate_node_template_agreement(self.raw_configuration)
self._validate_environment_name_agreement(self.raw_configuration)
def configuration_is_valid(self):
self._validate_configuration()
if len(self.validation_errors) > 0:
return False
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.