gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import os
import sys
import json
import tempfile
import shutil
import hashlib
import glob
import textwrap
from subprocess import call, check_output, CalledProcessError
from optparse import make_option
import debug # pyflakes:ignore
from django.core.management.base import BaseCommand
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from django.contrib.staticfiles.finders import BaseStorageFinder, AppDirectoriesFinder
class BaseDirectoryFinder(BaseStorageFinder):
storage = FileSystemStorage(location=settings.BASE_DIR)
class Command(BaseCommand):
"""
This command goes through any static/ directories of installed apps,
and the directories listed in settings.STATICFILES_DIRS. If package
description files for bower, npm, or grunt are found in any of these
locations, it will use the appropriate package manager to install the
listed packages in a temporary folder, using these commands:
- package.json: npm install
- Gruntfile.js: grunt default
- bower.json: bower install
It will then extract the distribution files to the location indicated in
settings.COMPONENT_ROOT.
"""
help = textwrap.dedent(__doc__).lstrip()
component_root = getattr(settings, 'COMPONENT_ROOT', os.path.join(settings.STATIC_ROOT, "components"))
option_list = BaseCommand.option_list + (
make_option('--with-version', dest='with_version', default=False, action='store_true',
help='Create component directories with version numbers'),
make_option('--keep-packages', dest='keep_packages', default=False, action='store_true',
help='Keep the downloaded bower packages, instead of removing them after moving '
'distribution files to settings.COMPONENT_ROOT'),
)
bower_info = {}
overrides = {}
def npm_install(self, pkg_json_path):
os.chdir(os.path.dirname(pkg_json_path))
call(['npm', 'install'])
def grunt_default(self, grunt_js_path):
os.chdir(os.path.dirname(grunt_js_path))
call(['grunt'])
def bower_install(self, bower_json_path, dest_dir):
"""Runs bower commnand for the passed bower.json path.
:param bower_json_path: bower.json file to install
:param dest_dir: where the compiled result will arrive
"""
# Verify that we are able to run bower, in order to give a good error message in the
# case that it's not installed. Do this separately from the 'bower install' call, in
# order not to warn about a missing bower in the case of installation-related errors.
try:
bower_version = check_output(['bower', '--version']).strip()
except OSError as e:
print("Trying to run bower failed -- is it installed? The error was: %s" % e)
exit(1)
except CalledProcessError as e:
print("Checking the bower version failed: %s" % e)
exit(2)
print("\nBower %s" % bower_version)
print("Installing from %s\n" % bower_json_path)
# bower args
args = ['bower', 'install', bower_json_path,
'--verbose', '--config.cwd={}'.format(dest_dir), '-p']
# run bower command
call(args)
def get_bower_info(self, bower_json_path):
if not bower_json_path in self.bower_info:
self.bower_info[bower_json_path] = json.load(open(bower_json_path))
def get_bower_main_list(self, bower_json_path, override):
"""
Returns the bower.json main list or empty list.
Applies overrides from the site-wide bower.json.
"""
self.get_bower_info(bower_json_path)
main_list = self.bower_info[bower_json_path].get('main')
component = self.bower_info[bower_json_path].get('name')
if (override in self.bower_info
and "overrides" in self.bower_info[override]
and component in self.bower_info[override].get("overrides")
and "main" in self.bower_info[override].get("overrides").get(component)):
main_list = self.bower_info[override].get("overrides").get(component).get("main")
if isinstance(main_list, list):
return main_list
if main_list:
return [main_list]
return []
def get_bower_version(self, bower_json_path):
"""Returns the bower.json main list or empty list.
"""
self.get_bower_info(bower_json_path)
return self.bower_info[bower_json_path].get("version")
def clean_components_to_static_dir(self, bower_dir, override):
print("\nMoving component files to %s\n" % (self.component_root,))
for directory in os.listdir(bower_dir):
print("Component: %s" % (directory, ))
src_root = os.path.join(bower_dir, directory)
for bower_json in ['bower.json', '.bower.json']:
bower_json_path = os.path.join(src_root, bower_json)
if os.path.exists(bower_json_path):
main_list = self.get_bower_main_list(bower_json_path, override)
version = self.get_bower_version(bower_json_path)
dst_root = os.path.join(self.component_root, directory)
if self.with_version:
assert not dst_root.endswith(os.sep)
dst_root += "-"+version
for pattern in filter(None, main_list):
src_pattern = os.path.join(src_root, pattern)
# main_list elements can be fileglob patterns
for src_path in glob.glob(src_pattern):
# See if we have a minified alternative
path, ext = os.path.splitext(src_path)
min_path = path+".min"+ext
if os.path.exists(min_path):
src_path = min_path
if not os.path.exists(src_path):
print("Could not find source path: %s" % (src_path, ))
# Build the destination path
src_part = src_path[len(src_root+'/'):]
if src_part.startswith('dist/'):
src_part = src_part[len('dist/'):]
dst_path = os.path.join(dst_root, src_part)
# Normalize the paths, for good looks
src_path = os.path.abspath(src_path)
dst_path = os.path.abspath(dst_path)
# Check if we need to copy the file at all.
if os.path.exists(dst_path):
with open(src_path) as src:
src_hash = hashlib.sha1(src.read()).hexdigest()
with open(dst_path) as dst:
dst_hash = hashlib.sha1(dst.read()).hexdigest()
if src_hash == dst_hash:
#print('{0} = {1}'.format(src_path, dst_path))
continue
# Make sure dest dir exists.
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
print(' {0} > {1}'.format(src_path, dst_path))
shutil.copy(src_path, dst_path)
break
def handle(self, *args, **options):
self.with_version = options.get("with_version")
self.keep_packages = options.get("keep_packages")
temp_dir = getattr(settings, 'BWR_APP_TMP_FOLDER', '.tmp')
temp_dir = os.path.abspath(temp_dir)
# finders
basefinder = BaseDirectoryFinder()
appfinder = AppDirectoriesFinder()
# Assume bower.json files are to be found in each app directory,
# rather than in the app's static/ subdirectory:
appfinder.source_dir = '.'
finders = (basefinder, appfinder, )
if os.path.exists(temp_dir):
if not self.keep_packages:
sys.stderr.write(
"\nWARNING:\n\n"
" The temporary package installation directory exists, but the --keep-packages\n"
" option has not been given. In order to not delete anything which should be\n"
" kept, %s will not be removed.\n\n"
" Please remove it manually, or use the --keep-packages option to avoid this\n"
" message.\n\n" % (temp_dir,))
self.keep_packages = True
else:
os.makedirs(temp_dir)
for finder in finders:
for path in finder.find('package.json', all=True):
self.npm_install(path)
for finder in finders:
for path in finder.find('Gruntfile.json', all=True):
self.grunt_default(path)
for finder in finders:
for path in finder.find('bower.json', all=True):
self.get_bower_info(path)
self.bower_install(path, temp_dir)
bower_dir = os.path.join(temp_dir, 'bower_components')
# nothing to clean
if not os.path.exists(bower_dir):
print('No components seems to have been found by bower, exiting.')
sys.exit(0)
self.clean_components_to_static_dir(bower_dir, path)
if not self.keep_packages:
shutil.rmtree(temp_dir)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_batch_account_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
*,
maxresults: Optional[int] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if maxresults is not None:
query_parameters['maxresults'] = _SERIALIZER.query("maxresults", maxresults, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
account_name: str,
private_link_resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources/{privateLinkResourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"privateLinkResourceName": _SERIALIZER.url("private_link_resource_name", private_link_resource_name, 'str', max_length=101, min_length=1, pattern=r'^[a-zA-Z0-9_-]+\.?[a-fA-F0-9-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateLinkResourceOperations(object):
"""PrivateLinkResourceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.batch.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_batch_account(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> Iterable["_models.ListPrivateLinkResourcesResult"]:
"""Lists all of the private link resources in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the response.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListPrivateLinkResourcesResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.batch.models.ListPrivateLinkResourcesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListPrivateLinkResourcesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_batch_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
maxresults=maxresults,
template_url=self.list_by_batch_account.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_batch_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListPrivateLinkResourcesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
private_link_resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets information about the specified private link resource.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param private_link_resource_name: The private link resource name. This must be unique within
the account.
:type private_link_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
private_link_resource_name=private_link_resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources/{privateLinkResourceName}'} # type: ignore
| |
# -*- coding: utf-8 -*-
import httplib as http
import contextlib
import mock
from nose.tools import * # flake8: noqa
import re
from tests.base import ApiTestCase, DbTestCase
from tests import factories
from tests.utils import make_drf_request
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
from api.registrations.serializers import RegistrationSerializer
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestNodeSerializerAndRegistrationSerializerDifferences(ApiTestCase):
"""
All fields on the Node Serializer other than the few we can serialize for withdrawals must be redeclared on the
Registration Serializer and wrapped in HideIfWithdrawal
HideIfRegistration fields should not be serialized on registrations.
"""
def setUp(self):
super(TestNodeSerializerAndRegistrationSerializerDifferences, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
self.registration = factories.RegistrationFactory(project = self.node, is_public=True)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
self.reg_url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
def test_registration_serializer(self):
# fields that are visible for withdrawals
visible_on_withdrawals = ['contributors', 'date_created', 'description', 'id', 'links', 'registration', 'title', 'type']
# fields that do not appear on registrations
non_registration_fields = ['registrations']
for field in NodeSerializer._declared_fields:
assert_in(field, RegistrationSerializer._declared_fields)
reg_field = RegistrationSerializer._declared_fields[field]
if field not in visible_on_withdrawals and field not in non_registration_fields:
assert_true(isinstance(reg_field, base_serializers.HideIfWithdrawal))
def test_hide_if_registration_fields(self):
node_res = self.app.get(self.url)
node_relationships = node_res.json['data']['relationships']
registration_res = self.app.get(self.reg_url)
registration_relationships = registration_res.json['data']['relationships']
hide_if_registration_fields = [field for field in NodeSerializer._declared_fields if isinstance(NodeSerializer._declared_fields[field], base_serializers.HideIfRegistration)]
for field in hide_if_registration_fields:
assert_in(field, node_relationships)
assert_not_in(field, registration_relationships)
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request()
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
assert_in('valued_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_serializers_have_get_absolute_url_method(self):
serializers = JSONAPISerializer.__subclasses__()
base_get_absolute_url = JSONAPISerializer.get_absolute_url
for serializer in serializers:
if not re.match('^(api_test|test).*', serializer.__module__):
assert hasattr(serializer, 'get_absolute_url'), 'No get_absolute_url method'
assert_not_equal(serializer.get_absolute_url, base_get_absolute_url)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_counts_included_in_link_fields_with_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if (field.related_meta or {}).get('count'):
link = relation['links'].values()[0]
assert_in('count', link['meta'])
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(self.url, params={'related_counts': 'fish'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(self.url, params={'embed': 'foo'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: foo")
def test_counts_included_in_children_field_with_children_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': 'children'})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
link = relation['links'].values()[0]
if (field.related_meta or {}).get('count') and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link['meta'])
def test_counts_included_in_children_and_contributors_fields_with_field_csv_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': 'children,contributors'})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
link = relation['links'].values()[0]
if (field.related_meta or {}).get('count') and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link['meta'])
def test_error_when_requesting_related_counts_for_attribute_field(self):
res = self.app.get(self.url, params={'related_counts': 'title'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(res.json['errors'][0]['detail'], "Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got 'title'")
class TestRelationshipField(DbTestCase):
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<pk>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<pk>', 'node_link_id': '<pk>'},
)
not_attribute_on_target = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-children',
related_view_kwargs={'node_id': '12345'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_self_and_related_fields(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in('/v2/nodes/{}/contributors/'.format(node._id), relationship_field['self']['href'])
assert_in('/v2/nodes/{}/'.format(node._id), relationship_field['related']['href'])
def test_field_with_two_kwargs(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in('/v2/nodes/{}/node_links/{}/'.format(node._id, node._id), field['related']['href'])
def test_field_with_non_attribute(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['not_attribute_on_target']['links']
assert_in('/v2/nodes/{}/children/'.format('12345'), field['related']['href'])
| |
"Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from .mplexporter.exporter import Exporter
from .mplexporter.renderers import Renderer
from .mpl_helpers import (convert_dashes, delete_last_col, get_props_cycled,
is_ax_end, xkcd_line)
from .models import (ColumnDataSource, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, PanTool, Plot, PreviewSaveTool,
ResetTool, WheelZoomTool)
from .plotting import (curdoc, output_file, output_notebook, output_server,
DEFAULT_TOOLS)
from .plotting_helpers import _process_tools_arg
# Names that we want in this namespace (fool pyflakes)
(PanTool, ResetTool, PreviewSaveTool, WheelZoomTool)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.source = ColumnDataSource()
self.xdr = DataRange1d()
self.ydr = DataRange1d()
self.non_text = [] # to save the text we don't want to convert by draw_text
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=self.xdr,
y_range=self.ydr,
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.xdr,
y_range=self.ydr,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
_plot.add_layout(_plot_rends[0], 'below') # xaxis
_plot.add_layout(_plot_rends[1], 'left') # yaxis
_plot.add_layout(_plot_rends[2]) # xgrid
_plot.add_layout(_plot_rends[3]) # ygrid
for r in _plot_rends[4:]: # all the glyphs
_plot.renderers.append(r)
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
self.non_text.append(self.plot.title)
self.grid = ax.get_xgridlines()[0]
# Add axis
bxaxis = self.make_axis(ax.xaxis, "below", props['xscale'])
byaxis = self.make_axis(ax.yaxis, "left", props['yscale'])
# Add grids
self.make_grid(bxaxis, 0)
self.make_grid(byaxis, 1)
# Setup collections info
nones = ("", " ", "None", "none", None)
cols = [col for col in self.ax.collections if col.get_paths() not in nones]
# Add collections renderers
[self.make_line_collection(col) for col in cols if isinstance(col, mpl.collections.LineCollection)]
[self.make_poly_collection(col) for col in cols if isinstance(col, mpl.collections.PolyCollection)]
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
line.x = self.source.add(x)
line.y = self.source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [int(i) for i in style['dasharray'].split(",")] # str2list(int)
#style['zorder'] # not in Bokeh
#line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
#line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
self.plot.add_glyph(self.source, line)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
marker.x = self.source.add(x)
marker.y = self.source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
#style['zorder'] # not in Bokeh
self.plot.add_glyph(self.source, marker)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""Path not implemented in Bokeh, but we have our own line ans poly
collection implementations, so passing here to avoid the NonImplemented
error.
"""
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text not in self.non_text:
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
#style['zorder'] # not in Bokeh
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
self.plot.add_glyph(self.source, text)
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, scale):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
# we need to keep the current axes names to avoid writing them in draw_text
self.non_text.append(ax.get_label_text())
if scale == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif scale == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
#label = ax.get_label()
#self.text_props(label, laxis, prefix="axis_label_")
#self.draw_text(label, position, coordinates, style, text_type="axis_label_")
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
#ticktext = ax.get_ticklabels()[0]
#self.text_props(ticktext, laxis, prefix="major_label_")
#self.draw_text(ticktext, position, coordinates, style, text_type="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=self.grid.get_color(),
grid_line_width=self.grid.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
multiline.xs = self.source.add(xs)
multiline.ys = self.source.add(ys)
self.multiline_props(multiline, col)
self.plot.add_glyph(self.source, multiline)
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
paths = col.get_paths()
polygons = [paths[i].to_polygons() for i in range(len(paths))]
polygons = [np.transpose(delete_last_col(polygon)) for polygon in polygons]
xs = [polygons[i][0] for i in range(len(polygons))]
ys = [polygons[i][1] for i in range(len(polygons))]
patches = Patches()
patches.xs = self.source.add(xs)
patches.ys = self.source.add(ys)
self.patches_props(patches, col)
self.plot.add_glyph(self.source, patches)
def multiline_props(self, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = self.source.add(colors)
multiline.line_width = self.source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = self.source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = self.source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = self.source.add(widths)
patches.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def to_bokeh(fig=None, name=None, server=None, notebook=False, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if fig is None:
fig = plt.gcf()
if any([name, server, notebook]):
if name:
if not server:
filename = name + ".html"
output_file(filename)
else:
output_server(name, url=server)
elif server:
if not notebook:
output_server("unnameuuuuuuuuuuuuuud", url=server)
else:
output_notebook(url=server)
elif notebook:
output_notebook()
else:
output_file("Unnamed.html")
doc = curdoc()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = Exporter(renderer)
exporter.run(fig)
doc._current_plot = renderer.fig # TODO (bev) do not rely on private attrs
doc.add(renderer.fig)
return renderer.fig
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations:
"""ExpressRouteCircuitsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuit"]:
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuit"]:
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _list_arp_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def begin_list_arp_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsArpTableListResult"]:
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def _list_routes_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def begin_list_routes_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]:
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def _list_routes_table_summary_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def begin_list_routes_table_summary(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]:
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def get_stats(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitStats":
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
async def get_peering_stats(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitStats":
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| |
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality, Relational
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, Dummy
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import (piecewise_fold,
Piecewise)
from sympy.logic.boolalg import BooleanFunction
from sympy.tensor.indexed import Idx
from sympy.sets.sets import Interval
from sympy.sets.fancysets import Range
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
def _common_new(cls, function, *symbols, **assumptions):
"""Return either a special return value or the tuple,
(function, limits, orientation). This code is common to
both ExprWithLimits and AddWithLimits."""
function = sympify(function)
if hasattr(function, 'func') and isinstance(function, Equality):
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
for i, li in enumerate(limits):
if len(li) == 4:
function = function.subs(li[0], li[-1])
limits[i] = Tuple(*li[:-1])
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Any embedded piecewise functions need to be brought out to the
# top level. We only fold Piecewise that contain the integration
# variable.
reps = {}
symbols_of_integration = set([i[0] for i in limits])
for p in function.atoms(Piecewise):
if not p.has(*symbols_of_integration):
reps[p] = Dummy()
# mask off those that don't
function = function.xreplace(reps)
# do the fold
function = piecewise_fold(function)
# remove the masking
function = function.xreplace({v: k for k, v in reps.items()})
return function, limits, orientation
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, (Relational, BooleanFunction)):
variable = V.atoms(Symbol).pop()
V = (variable, V.as_set())
if isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):
if isinstance(V, Idx):
if V.lower is None or V.upper is None:
limits.append(Tuple(V))
else:
limits.append(Tuple(V, V.lower, V.upper))
else:
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
if len(V) == 2 and isinstance(V[1], Range):
lo = V[1].inf
hi = V[1].sup
dx = abs(V[1].step)
V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]
V = sympify(flatten(V)) # a list of sympified elements
if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval): # 2 -> 3
# Interval
V[1:] = [V[1].start, V[1].end]
elif len(V) == 3:
# general case
if V[2] is None and not V[1] is None:
orientation *= -1
V = [newsymbol] + [i for i in V[1:] if i is not None]
if not isinstance(newsymbol, Idx) or len(V) == 3:
if len(V) == 4:
limits.append(Tuple(*V))
continue
if len(V) == 3:
if isinstance(newsymbol, Idx):
# Idx represents an integer which may have
# specified values it can take on; if it is
# given such a value, an error is raised here
# if the summation would try to give it a larger
# or smaller value than permitted. None and Symbolic
# values will not raise an error.
lo, hi = newsymbol.lower, newsymbol.upper
try:
if lo is not None and not bool(V[1] >= lo):
raise ValueError("Summation will set Idx value too low.")
except TypeError:
pass
try:
if hi is not None and not bool(V[2] <= hi):
raise ValueError("Summation will set Idx value too high.")
except TypeError:
pass
limits.append(Tuple(*V))
continue
if len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, _ = pre
else:
return pre
# limits must have upper and lower bounds; the indefinite form
# is not supported. This restriction does not apply to AddWithLimits
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the limit variables.
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def bound_symbols(self):
"""Return only variables that are dummy variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i, j, k
>>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
[i, j]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits if len(l) != 1]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
{y}
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for integrals
change_index : Perform mapping on the sum and product dummy variables
"""
from sympy.core.function import AppliedUndef, UndefinedFunction
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
if new._diff_wrt:
xab = (new,)
else:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
@property
def has_finite_limits(self):
"""
Returns True if the limits are known to be finite, either by the
explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be infinite, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 1, 8)).has_finite_limits
True
>>> Integral(x, (x, 1, oo)).has_finite_limits
False
>>> M = Symbol('M')
>>> Sum(x, (x, 1, M)).has_finite_limits
>>> N = Symbol('N', integer=True)
>>> Product(x, (x, 1, N)).has_finite_limits
True
See Also
========
has_reversed_limits
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
if any(l.is_infinite for l in lim[1:]):
# Any of the bounds are +/-oo
return False
elif any(l.is_infinite is None for l in lim[1:]):
# Maybe there are assumptions on the variable?
if lim[0].is_infinite is None:
ret_None = True
else:
if lim[0].is_infinite is None:
ret_None = True
if ret_None:
return None
return True
@property
def has_reversed_limits(self):
"""
Returns True if the limits are known to be in reversed order, either
by the explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be in normal order, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 8, 1)).has_reversed_limits
True
>>> Sum(x, (x, 1, oo)).has_reversed_limits
False
>>> M = Symbol('M')
>>> Integral(x, (x, 1, M)).has_reversed_limits
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(x, (x, 1, N)).has_reversed_limits
False
>>> Product(x, (x, 2, N)).has_reversed_limits
>>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
False
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
var, a, b = lim
dif = b - a
if dif.is_extended_negative:
return True
elif dif.is_extended_nonnegative:
continue
else:
ret_None = True
else:
return None
if ret_None:
return None
return False
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, orientation = pre
else:
return pre
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function] # orientation not used in ExprWithLimits
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not set(self.variables) & w.free_symbols)
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, *self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
from sympy.matrices.matrices import MatrixBase
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[self.func(i, *self.limits) for i in summand.args])
elif isinstance(summand, MatrixBase):
return summand.applyfunc(lambda x: self.func(x, *self.limits))
elif summand != self.function:
return self.func(summand, *self.limits)
return self
| |
# Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from flask import Flask, jsonify, request
from discovery import Discovery
from speech_to_text import Speech_to_text
from getConfidence import NLC
import requests
app = Flask(__name__)
discovery = None
Speech = None
classifier = None
discovery_collection_id = "0cead13f-1bf4-438b-8c6b-e3f412d2eb3e"
discovery_configuration_id = "59aca88c-a9c2-4299-a6a2-be7e5e3eea6b"
discovery_environment_id = "67c3f67b-a49f-4156-a795-1ff97ad09e6d"
classifier_id = "ebd15ex229-nlc-54210"
if 'VCAP_SERVICES' in os.environ:
logging.basicConfig(filename='welcome.log',level=logging.DEBUG)
logging.info('Using VCAP on remote')
vcap = json.loads(os.getenv('VCAP_SERVICES'))
if 'discovery' in vcap:
discreds = vcap['discovery'][0]['credentials']
disuser = discreds['username']
dispassword = discreds['password']
disurl = discreds['url']
discovery = Discovery(disurl, disuser, dispassword,
discovery_collection_id,
discovery_configuration_id,
discovery_environment_id)
if 'natural_language_classifier' in vcap:
nlccreds = vcap['natural_language_classifier'][0]['credentials']
nlcuser = nlccreds['username']
nlcpassword = nlccreds['password']
nlcurl = nlccreds['url']
classifier = NLC(nlcurl, nlcuser, nlcpassword, classifier_id)
if 'speech_to_text' in vcap:
speechcreds = vcap['speech_to_text'][0]['credentials']
speechuser = speechcreds['username']
speechpassword = speechcreds['password']
speechurl = speechcreds['url']
Speech = Speech_to_text(speechurl, speechuser, speechpassword)
elif os.path.isfile('vcap-local-back.json'):
logging.basicConfig(filename="welcome.log", level=logging.DEBUG)
with open('vcap-local-back.json') as f:
logging.info('Using Local VCAP credentials')
vcap = json.load(f)
discreds = vcap['discovery'][0]['credentials']
disuser = discreds['username']
dispassword = discreds['password']
disurl = discreds['url']
discovery = Discovery(disurl, disuser, dispassword,
discovery_collection_id,
discovery_configuration_id,
discovery_environment_id)
speechcreds = vcap['speech_to_text'][0]['credentials']
speechuser = speechcreds['username']
speechpassword = speechcreds['password']
speechurl = speechcreds['url']
Speech = Speech_to_text(speechurl, speechuser, speechpassword)
nlccreds = vcap['natural_language_classifier'][0]['credentials']
nlcuser = nlccreds['username']
nlcpassword = nlccreds['password']
nlcurl = nlccreds['url']
classifier = NLC(nlcurl, nlcuser, nlcpassword, classifier_id)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/audio')
def audiosend():
return app.send_static_file('audio.html')
@app.route('/api/query', methods=['POST'])
def query_watson():
query_obj = request.get_json()
return jsonify(result=handle_input(query_obj))
@app.route('/api/feedback', methods=['POST'])
def submit_feedback():
request_obj = request.get_json()
try:
discovery_feedback_add_edit(request_obj['query'], request_obj['document_id'], request_obj['feedback'])
return jsonify(result={"response" : "Feedback submitted"})
except:
return jsonify(resylt={"error": "Error submitting feedback"})
def discovery_feedback(query, document_id, relevance):
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data?version=2017-11-07".format(discovery_environment_id,discovery_collection_id)
data = {
"natural_language_query": query,
"examples": [
{
"document_id": document_id,
"relevance": relevance
}
]
}
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
print r
def discovery_feedback_add_edit(query, document_id, relevance):
ALREADY_EXISTS = "ALREADY_EXISTS"
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data?version=2017-11-07".format(discovery_environment_id,discovery_collection_id)
data = {
"natural_language_query": query,
"examples": [
{
"document_id": document_id,
"relevance": relevance
}
]
}
headers = {"content-type":"application/json"}
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
if ALREADY_EXISTS in error_string:
query_id = error_string.split(' already exists in collection')[0]
query_id = query_id.split('id ')[-1]
data = {
"document_id": document_id,
"relevance": relevance
}
print "Query already exists:",query_id
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data/{2}/examples?version=2017-11-07".format(discovery_environment_id,discovery_collection_id,query_id)
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
print error_string
if ALREADY_EXISTS in error_string:
example_id = error_string.split(' already has an example')[0]
example_id = example_id.split('Document id ')[-1]
print example_id
print "document already exists:",example_id
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data/{2}/examples/{3}?version=2017-11-07".format(discovery_environment_id,discovery_collection_id,query_id,example_id)
r = requests.put(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
print error_string
except:
print "Document score updated."
except:
print "Document added to query."
except:
print "New Query/document pair accepted."
def handle_input(input_object):
return_object = {'error': '', 'articles': [], 'categories': []}
user_input = input_object['queryText']
user_category = input_object['category']
logging.info('welcome.handle_input(): queryText: ' + user_input + ' category: ' + user_category)
try:
categories = []
if not user_category:
categories = nlc(user_input)
else:
categories.append(user_category)
return_object['categories'] = categories
if len(categories) == 1:
matches = discovery.query(user_input, categories[0])
for match in matches:
return_object['articles'].append({'html': match['html'], 'document_id': match['id']})
except:
return_object['error'] = 'Error searching for request.'
return json.dumps(return_object)
@app.route('/audio/blob', methods=['GET', 'POST'])
def get_blob():
if request.method == 'POST':
a = request.files['data']
fname = os.path.join(os.getcwd()+"/static", "test.wav")
a.save(fname)
text = Speech.speech_to_text(fname)
return text
def nlc(s):
return classifier.classify(s)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| |
# -*- coding: utf-8 -*-
"""
.. module:: pytfa
:platform: Unix, Windows
:synopsis: Thermodynamics-based Flux Analysis
.. moduleauthor:: pyTFA team
MILP-fu to reformulate problems
"""
import sympy
# import optlang
from collections import namedtuple
from .variables import LinearizationVariable
from .constraints import LinearizationConstraint
# Faster than optlang Constraint object
ConstraintTuple = namedtuple('ConstraintTuple',['name','expression','ub','lb'])
OPTLANG_BINARY = 'binary'
def subs_bilinear(expr):
"""
Substitutes bilinear forms from an expression with dedicated variables
:param expr:
:return:
"""
bilinear_ix = [isinstance(x,sympy.Mul) for e,x in enumerate(expr.args)]
new_expr = expr.copy()
replacement_dict = dict()
for bix in bilinear_ix:
term = expr.args[bix]
name = '__MUL__'.join(term.args)
z = sympy.Symbol(name = name)
new_expr = new_expr.subs(term,z)
replacement_dict[term] = z
return new_expr, replacement_dict
def glovers_linearization(b, fy, z = None, L=0, U=1000):
"""
Glover, Fred.
"Improved linear integer programming formulations of nonlinear integer problems."
Management Science 22.4 (1975): 455-460.
Performs Glovers Linearization of a product
z = b*f(y) <=> z - b*f(y) = 0
<=>
{ L*b <= z <= U*b
{ f(y) - U*(1-b) <= z <= f(y) - L*(1-b)
where :
* b is a binary variable
* f a linear combination of continuous or integer variables y
:param b: Must be a binary optlang variable
:param z: Must be an optlang variable. Will be mapped to the product so
that z = b*f(y)
:param fy: Must be an expression or variable
:param L: minimal value for fy
:param U: maximal value for fy
:return:
"""
assert(b.type == OPTLANG_BINARY)
if z is None:
name = '__MUL__'.join([b.name, fy.name])
z = sympy.Symbol(name = name)
# 1st Glovers constraint
# L*b <= z
# 0 <= z - L*b
cons1 = optlang.Constraint(name = name + '_1',
expression = z - L*b,
lb = 0)
# 2nd Glovers constraint
# z <= U*b
# 0 <= U*b - z
cons2 = optlang.Constraint(name = name + '_2',
expression = U*b - z,
lb=0)
# 3rd Glovers constraint
# fy - U*(1-b) <= z
# 0 <= z - fy + U*(1-b)
cons3 = optlang.Constraint(name = name + '_3',
expression = z - fy + U*(1-b),
lb = 0)
# 4th Glovers constraint
# z <= fy - L*(1-b)
# 0 <= fy - L*(1-b) - z
cons4 = optlang.Constraint(name = name + '_4',
expression = fy - L*(1-b) - z,
lb=0)
return z, [cons1,cons2,cons3,cons4]
def petersen_linearization(b, x, z = None, M=1000):
"""
PETERSEN, C,,
"A Note on Transforming the Product of Variables to Linear Form in Linear CLIFFORD Programs,"
Working Paper, Purdue University, 1971.
Performs Petersen Linearization of a product
z = b*x <=> z - b*x = 0
<=>
{ x + M*b - M <= z <= M*b
{ z <= x
where :
* b is a binary variable
* f a linear combination of continuous or integer variables y
:param x: Must be an expression or variable
:param b: Must be a binary optlang variable
:param z: Must be an optlang variable. Will be mapped to the product so
that z = b*f(y)
:param M: big-M constraint
:return:
"""
assert(b.type == OPTLANG_BINARY)
if z is None:
name = '__MUL__'.join([b.name, x.name])
z = sympy.Symbol(name = name)
else:
name = z.name
# 1st Petersen constraint
# x + M*b - M <= z
# x + M*b - z <= M
cons1 = ConstraintTuple(name = name + '_1',
expression = x + M*b - z,
lb=0,
ub = M)
# 2nd Petersen constraint
# z <= M*b
# 0 <= M*b - z
cons2 = ConstraintTuple(name = name + '_2',
expression = M*b - z,
lb=0,
ub=None)
# 3rd Petersen constraint
# z <= x
# 0 <= x - z
cons3 = ConstraintTuple(name = name + '_3',
expression = x - z,
lb = 0,
ub = None,
)
return z, [cons1,cons2,cons3]
def linearize_product(model, b, x, queue=False):
"""
:param model:
:param b: the binary variable
:param x: the continuous variable
:param queue: whether to queue the variables and constraints made
:return:
"""
# Linearization step for ga_i * [E]
z_name = '__MUL__'.join([b.name, x.name])
# Add the variables
model_z_u = model.add_variable(kind=LinearizationVariable,
hook=model,
id_=z_name,
lb=0,
ub=x.ub,
queue=False)
big_m = x.ub
z_u, new_constraints = petersen_linearization(b=b, x=x, M=big_m,
z=model_z_u)
# Add the constraints:
for cons in new_constraints:
model.add_constraint(kind=LinearizationConstraint,
hook=model,
id_=cons.name,
expr=cons.expression,
# expr=new_expression,
ub=cons.ub,
lb=cons.lb,
queue=queue)
model._push_queue()
return model_z_u
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import jsonschema
from oslo_config import cfg
import pecan
from pecan import abort
from six.moves import http_client
from st2api.controllers.base import BaseRestControllerMixin
from st2api.controllers.resource import ResourceController
from st2api.controllers.v1.executionviews import ExecutionViewsController
from st2api.controllers.v1.executionviews import SUPPORTED_FILTERS
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_CANCELED
from st2common.constants.action import CANCELABLE_STATES
from st2common.exceptions.trace import TraceNotFoundException
from st2common.models.api.action import LiveActionAPI
from st2common.models.api.base import jsexpose
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.execution import ActionExecution
from st2common.services import action as action_service
from st2common.services import executions as execution_service
from st2common.rbac.utils import request_user_is_admin
from st2common.util import jsonify
from st2common.util import isotime
from st2common.util import action_db as action_utils
from st2common.rbac.types import PermissionType
from st2common.rbac.decorators import request_user_has_permission
from st2common.rbac.utils import assert_request_user_has_resource_permission
__all__ = [
'ActionExecutionsController'
]
LOG = logging.getLogger(__name__)
# Note: We initialize filters here and not in the constructor
SUPPORTED_EXECUTIONS_FILTERS = SUPPORTED_FILTERS
SUPPORTED_EXECUTIONS_FILTERS.update({
'timestamp_gt': 'start_timestamp.gt',
'timestamp_lt': 'start_timestamp.lt'
})
# Name of the query parameter for toggling on the display of secrets to the admin users in the API
# responses
SHOW_SECRETS_QUERY_PARAM = 'show_secrets'
MONITOR_THREAD_EMPTY_Q_SLEEP_TIME = 5
MONITOR_THREAD_NO_WORKERS_SLEEP_TIME = 1
class ActionExecutionsControllerMixin(BaseRestControllerMixin):
"""
Mixin class with shared methods.
"""
model = ActionExecutionAPI
access = ActionExecution
# A list of attributes which can be specified using ?exclude_attributes filter
valid_exclude_attributes = [
'result',
'trigger_instance'
]
def _get_requester(self):
# Retrieve username of the authed user (note - if auth is disabled, user will not be
# set so we fall back to the system user name)
request_token = pecan.request.context.get('token', None)
return request_token.user if request_token else cfg.CONF.system_user.user
def _get_from_model_kwargs_for_request(self, request):
"""
Set mask_secrets=False if the user is an admin and provided ?show_secrets=True query param.
"""
from_model_kwargs = {'mask_secrets': cfg.CONF.api.mask_secrets}
show_secrets = self._get_query_param_value(request=request,
param_name=SHOW_SECRETS_QUERY_PARAM,
param_type='bool',
default_value=False)
if show_secrets and request_user_is_admin(request=request):
from_model_kwargs['mask_secrets'] = False
return from_model_kwargs
def _handle_schedule_execution(self, liveaction):
# Assert the permissions
action_ref = liveaction.action
action_db = action_utils.get_action_by_ref(action_ref)
assert_request_user_has_resource_permission(request=pecan.request, resource_db=action_db,
permission_type=PermissionType.ACTION_EXECUTE)
try:
return self._schedule_execution(liveaction=liveaction)
except ValueError as e:
LOG.exception('Unable to execute action.')
abort(http_client.BAD_REQUEST, str(e))
except jsonschema.ValidationError as e:
LOG.exception('Unable to execute action. Parameter validation failed.')
abort(http_client.BAD_REQUEST, re.sub("u'([^']*)'", r"'\1'", e.message))
except TraceNotFoundException as e:
abort(http_client.BAD_REQUEST, str(e))
except Exception as e:
LOG.exception('Unable to execute action. Unexpected error encountered.')
abort(http_client.INTERNAL_SERVER_ERROR, str(e))
def _schedule_execution(self, liveaction):
# Initialize execution context if it does not exist.
if not hasattr(liveaction, 'context'):
liveaction.context = dict()
liveaction.context['user'] = self._get_requester()
LOG.debug('User is: %s' % liveaction.context['user'])
# Retrieve other st2 context from request header.
if 'st2-context' in pecan.request.headers and pecan.request.headers['st2-context']:
context = jsonify.try_loads(pecan.request.headers['st2-context'])
if not isinstance(context, dict):
raise ValueError('Unable to convert st2-context from the headers into JSON.')
liveaction.context.update(context)
# Schedule the action execution.
liveactiondb = LiveActionAPI.to_model(liveaction)
_, actionexecutiondb = action_service.request(liveactiondb)
from_model_kwargs = self._get_from_model_kwargs_for_request(request=pecan.request)
return ActionExecutionAPI.from_model(actionexecutiondb, from_model_kwargs)
def _get_result_object(self, id):
"""
Retrieve result object for the provided action execution.
:param id: Action execution ID.
:type id: ``str``
:rtype: ``dict``
"""
fields = ['result']
action_exec_db = self.access.impl.model.objects.filter(id=id).only(*fields).get()
return action_exec_db.result
def _get_children(self, id_, depth=-1, result_fmt=None):
# make sure depth is int. Url encoding will make it a string and needs to
# be converted back in that case.
depth = int(depth)
from_model_kwargs = self._get_from_model_kwargs_for_request(request=pecan.request)
LOG.debug('retrieving children for id: %s with depth: %s', id_, depth)
descendants = execution_service.get_descendants(actionexecution_id=id_,
descendant_depth=depth,
result_fmt=result_fmt)
return [self.model.from_model(descendant, from_model_kwargs) for
descendant in descendants]
def _validate_exclude_fields(self, exclude_fields):
"""
Validate that provided exclude fields are valid.
"""
if not exclude_fields:
return exclude_fields
for field in exclude_fields:
if field not in self.valid_exclude_attributes:
msg = 'Invalid or unsupported attribute specified: %s' % (field)
raise ValueError(msg)
return exclude_fields
class ActionExecutionChildrenController(ActionExecutionsControllerMixin):
@request_user_has_permission(permission_type=PermissionType.EXECUTION_VIEW)
@jsexpose(arg_types=[str])
def get(self, id, **kwargs):
"""
Retrieve children for the provided action execution.
:rtype: ``list``
"""
return self._get_children(id_=id, **kwargs)
class ActionExecutionAttributeController(ActionExecutionsControllerMixin):
@request_user_has_permission(permission_type=PermissionType.EXECUTION_VIEW)
@jsexpose()
def get(self, id, attribute, **kwargs):
"""
Retrieve a particular attribute for the provided action execution.
Handles requests:
GET /actionexecutions/<id>/<attribute>
:rtype: ``dict``
"""
fields = [attribute]
fields = self._validate_exclude_fields(fields)
action_exec_db = self.access.impl.model.objects.filter(id=id).only(*fields).get()
result = getattr(action_exec_db, attribute, None)
return result
class ActionExecutionReRunController(ActionExecutionsControllerMixin, ResourceController):
supported_filters = {}
exclude_fields = [
'result',
'trigger_instance'
]
class ExecutionParameters(object):
def __init__(self, parameters=None):
self.parameters = parameters or {}
def validate(self):
if self.parameters:
assert isinstance(self.parameters, dict)
return True
@jsexpose(body_cls=ExecutionParameters, status_code=http_client.CREATED)
def post(self, execution_parameters, execution_id):
"""
Re-run the provided action execution optionally specifying override parameters.
Handles requests:
POST /executions/<id>/re_run
"""
parameters = execution_parameters.parameters
# Note: We only really need parameters here
existing_execution = self._get_one(id=execution_id, exclude_fields=self.exclude_fields)
# Merge in any parameters provided by the user
new_parameters = copy.deepcopy(existing_execution.parameters)
new_parameters.update(parameters)
# Create object for the new execution
action_ref = existing_execution.action['ref']
new_liveaction = LiveActionDB(action=action_ref, parameters=new_parameters)
result = self._handle_schedule_execution(liveaction=new_liveaction)
return result
class ActionExecutionsController(ActionExecutionsControllerMixin, ResourceController):
"""
Implements the RESTful web endpoint that handles
the lifecycle of ActionExecutions in the system.
"""
# Nested controllers
views = ExecutionViewsController()
children = ActionExecutionChildrenController()
attribute = ActionExecutionAttributeController()
re_run = ActionExecutionReRunController()
# ResourceController attributes
query_options = {
'sort': ['-start_timestamp', 'action.ref']
}
supported_filters = SUPPORTED_EXECUTIONS_FILTERS
filter_transform_functions = {
'timestamp_gt': lambda value: isotime.parse(value=value),
'timestamp_lt': lambda value: isotime.parse(value=value)
}
@request_user_has_permission(permission_type=PermissionType.EXECUTION_VIEW)
@jsexpose()
def get_all(self, exclude_attributes=None, **kw):
"""
List all actionexecutions.
Handles requests:
GET /actionexecutions[?exclude_attributes=result,trigger_instance]
:param exclude_attributes: Comma delimited string of attributes to exclude from the object.
:type exclude_attributes: ``str``
"""
if exclude_attributes:
exclude_fields = exclude_attributes.split(',')
else:
exclude_fields = None
exclude_fields = self._validate_exclude_fields(exclude_fields=exclude_fields)
# Use a custom sort order when filtering on a timestamp so we return a correct result as
# expected by the user
if 'timestamp_lt' in kw:
query_options = {'sort': ['-start_timestamp', 'action.ref']}
kw['query_options'] = query_options
elif 'timestamp_gt' in kw:
query_options = {'sort': ['+start_timestamp', 'action.ref']}
kw['query_options'] = query_options
return self._get_action_executions(exclude_fields=exclude_fields, **kw)
@jsexpose(arg_types=[str])
def get_one(self, id, exclude_attributes=None, **kwargs):
"""
Retrieve a single execution.
Handles requests:
GET /actionexecutions/<id>[?exclude_attributes=result,trigger_instance]
:param exclude_attributes: Comma delimited string of attributes to exclude from the object.
:type exclude_attributes: ``str``
"""
if exclude_attributes:
exclude_fields = exclude_attributes.split(',')
else:
exclude_fields = None
exclude_fields = self._validate_exclude_fields(exclude_fields=exclude_fields)
return self._get_one(id=id, exclude_fields=exclude_fields)
@jsexpose(body_cls=LiveActionAPI, status_code=http_client.CREATED)
def post(self, liveaction):
return self._handle_schedule_execution(liveaction=liveaction)
@request_user_has_permission(permission_type=PermissionType.EXECUTION_STOP)
@jsexpose(arg_types=[str])
def delete(self, exec_id):
"""
Stops a single execution.
Handles requests:
DELETE /actionexecutions/<id>
"""
execution_api = self._get_one(id=exec_id)
if not execution_api:
abort(http_client.NOT_FOUND, 'Execution with id %s not found.' % exec_id)
liveaction_id = execution_api.liveaction['id']
if not liveaction_id:
abort(http_client.INTERNAL_SERVER_ERROR,
'Execution object missing link to liveaction %s.' % liveaction_id)
try:
liveaction_db = LiveAction.get_by_id(liveaction_id)
except:
abort(http_client.INTERNAL_SERVER_ERROR,
'Execution object missing link to liveaction %s.' % liveaction_id)
if liveaction_db.status == LIVEACTION_STATUS_CANCELED:
abort(http_client.OK, 'Action is already in "canceled" state.')
if liveaction_db.status not in CANCELABLE_STATES:
abort(http_client.OK, 'Action cannot be canceled. State = %s.' % liveaction_db.status)
try:
(liveaction_db, execution_db) = action_service.request_cancellation(
liveaction_db, self._get_requester())
except:
LOG.exception('Failed requesting cancellation for liveaction %s.', liveaction_db.id)
abort(http_client.INTERNAL_SERVER_ERROR, 'Failed canceling execution.')
from_model_kwargs = self._get_from_model_kwargs_for_request(request=pecan.request)
return ActionExecutionAPI.from_model(execution_db, from_model_kwargs)
@jsexpose()
def options(self, *args, **kw):
return
def _get_action_executions(self, exclude_fields=None, **kw):
"""
:param exclude_fields: A list of object fields to exclude.
:type exclude_fields: ``list``
"""
kw['limit'] = int(kw.get('limit', 100))
LOG.debug('Retrieving all action executions with filters=%s', kw)
return super(ActionExecutionsController, self)._get_all(exclude_fields=exclude_fields,
**kw)
| |
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl 0.9.8 s_client against s2nd
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import threading
import uuid
import re
import string
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
from time import sleep
S_CLIENT_NEGOTIATED_CIPHER_PREFIX="Cipher : "
PROTO_VERS_TO_S_CLIENT_ARG = {
S2N_TLS10 : "-tls1",
S2N_TLS11 : "-tls1_1",
S2N_TLS12 : "-tls1_2",
}
use_corked_io=False
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def validate_version(expected_version, output):
for line in output.splitlines():
if ACTUAL_VERSION_STR.format(expected_version or S2N_TLS10) in line:
return 0
return -1
def validate_data_transfer(expected_data, s_client_out, s2nd_out):
"""
Verify that the application data written between s_client and s2nd is encrypted and decrypted successfuly.
"""
found = 0
for line in s2nd_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s2nd")
return -1
found = 0
for line in s_client_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s_client")
return -1
return 0
def find_expected_cipher(expected_cipher, s_client_out):
"""
Make sure s_client and s2nd negotiate the cipher suite we expect
"""
s_client_out_len = len(s_client_out)
full_expected_string = S_CLIENT_NEGOTIATED_CIPHER_PREFIX + expected_cipher
for line in s_client_out.splitlines():
if full_expected_string in line:
return 0
break
print("Failed to find " + expected_cipher + " in s_client output")
return -1
def read_process_output_until(process, marker):
output = ""
while True:
line = process.stdout.readline().decode("utf-8")
output += line
if marker in line:
return output
return output
def try_handshake(endpoint, port, cipher, ssl_version, server_name=None, strict_hostname=False, server_cert=None, server_key=None,
server_cert_key_list=None, expected_server_cert=None, server_cipher_pref=None, ocsp=None, sig_algs=None, curves=None, resume=False, no_ticket=False,
prefer_low_latency=False, enter_fips_mode=False, client_auth=None, client_cert=DEFAULT_CLIENT_CERT_PATH,
client_key=DEFAULT_CLIENT_KEY_PATH, expected_cipher=None, expected_extensions=None):
"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_name: server_name value for s_client to send
:param bool strict_hostname: whether s_client should strictly check to see if server certificate matches the server_name
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param list server_cert_key_list: a list of (cert_path, key_path) tuples for multicert tests.
:param str expected_server_cert: Path to the expected server certificate should be sent to s_client.
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:param str expected_cipher: the cipher we expect to negotiate
:param list expected_extensions: list of expected extensions that s_client should receive.
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and server_cert_key_list is None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd"]
if server_cert is not None:
s2nd_cmd.extend(["--cert", server_cert])
if server_key is not None:
s2nd_cmd.extend(["--key", server_key])
if server_cert_key_list is not None:
for cert_key_path in server_cert_key_list:
cert_path = cert_key_path[0]
key_path = cert_key_path[1]
s2nd_cmd.extend(["--cert", cert_path])
s2nd_cmd.extend(["--key", key_path])
if ocsp is not None:
s2nd_cmd.extend(["--ocsp", ocsp])
if prefer_low_latency == True:
s2nd_cmd.append("--prefer-low-latency")
if client_auth is not None:
s2nd_cmd.append("-m")
s2nd_cmd.extend(["-t", client_cert])
if use_corked_io:
s2nd_cmd.append("-C")
s2nd_cmd.extend([str(endpoint), str(port)])
s2nd_ciphers = "test_all_tls12"
if server_cipher_pref is not None:
s2nd_ciphers = server_cipher_pref
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if no_ticket:
s2nd_cmd.append("-T")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure s2nd has started
s2nd.stdout.readline()
s_client_cmd = ["openssl", "s_client", "-connect", str(endpoint) + ":" + str(port)]
if ssl_version is not None:
s_client_cmd.append(PROTO_VERS_TO_S_CLIENT_ARG[ssl_version])
if cipher is not None:
s_client_cmd.extend(["-cipher", cipher])
# For verifying extensions that s2nd sends expected extensions
s_client_cmd.append("-tlsextdebug")
# Fire up s_client
s_client = subprocess.Popen(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
s_client_out = ""
s2nd_out = ""
openssl_connect_marker = "CONNECTED"
openssl_reconnect_marker = "drop connection and then reconnect"
end_of_msg_marker = "__end_of_msg__"
# Wait until openssl and s2n have finished the handshake and are connected to each other
s_client_out += read_process_output_until(s_client, openssl_connect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
if resume == True:
for i in range(0,5):
# Wait for openssl to resume connection 5 times in a row, and verify resumption works.
s_client_out += read_process_output_until(s_client, openssl_reconnect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
data_to_validate = cipher + " " + str(uuid.uuid4())
# Write the data to openssl towards s2n server
msg = (data_to_validate + "\n" + end_of_msg_marker + "\n\n").encode("utf-8")
s_client.stdin.write(msg)
s_client.stdin.flush()
# Write the data to s2n towards openssl client
s2nd.stdin.write(msg)
s2nd.stdin.flush()
# Wait for the Data transfer to complete between OpenSSL and s2n
s_client_out += read_process_output_until(s_client, end_of_msg_marker)
s2nd_out += read_process_output_until(s2nd, end_of_msg_marker)
cleanup_processes(s2nd, s_client)
if validate_data_transfer(data_to_validate, s_client_out, s2nd_out) != 0:
return -1
if validate_version(ssl_version, s2nd_out) != 0:
return -1
if resume is True:
if validate_resume(s2nd_out) != 0:
return -1
if ocsp is not None:
if validate_ocsp(s_client_out) != 0:
return -1
if expected_cipher is not None:
if find_expected_cipher(expected_cipher, s_client_out) != 0:
return -1
if strict_hostname is True:
if validate_hostname(s_client_out) != 0:
return -1
if expected_server_cert is not None:
if validate_selected_certificate(s_client_out, expected_server_cert) != 0:
return -1
if expected_extensions is not None:
for extension in expected_extensions:
if extension.s_client_validate(s_client_out) != 0:
return -1
return 0
def cert_path_to_str(cert_path):
# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
return '-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 4 # Multiply by 4 to increase parallelization between integration tests
print("\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_handshake_test(host, port, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, client_cert_path, client_key_path):
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
if not cipher.openssl_1_1_1_compatible:
return 0
if ssl_version and ssl_version < cipher_vers:
return 0
client_cert_str=str(use_client_auth)
if (use_client_auth is not None) and (client_cert_path is not None):
client_cert_str = cert_path_to_str(client_cert_path)
ret = try_handshake(host, port, cipher_name, ssl_version, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth, client_cert=client_cert_path, client_key=client_key_path)
result_prefix = "Cipher: %-30s ClientCert: %-16s Vers: %-8s ... " % (cipher_name, client_cert_str, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def handshake_test(host, port, test_ciphers, fips_mode, no_ticket=False, use_client_auth=None, use_client_cert=None, use_client_key=None):
"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""
print("\n\tRunning handshake tests:")
failed = 0
for ssl_version in [S2N_TLS10, None]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
port_offset = 0
results = []
# Only test non ECC ciphers, openssl 0.9.8 has trouble with ECDHE.
# Only test 1.0/SSLv3 ciphers since 0.9.8 only supports those.
for cipher in filter(lambda x: "ECDHE" not in x.openssl_name and x.min_tls_vers < S2N_TLS11, test_ciphers):
async_result = run_handshake_test(host, port + port_offset, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, use_client_cert, use_client_key)
port_offset += 1
results.append(async_result)
for async_result in results:
if async_result != 0:
failed = 1
return failed
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--use_corked_io', action='store_true', help='Turn corked IO on/off')
parser.add_argument('--libcrypto', default='openssl-1.1.1', choices=S2N_LIBCRYPTO_CHOICES,
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")
args = parser.parse_args()
use_corked_io = args.use_corked_io
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
libcrypto_version = args.libcrypto
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning tests with: " + os.popen('openssl version').read())
if use_corked_io == True:
print("Corked IO is on")
failed = 0
failed += handshake_test(host, port, test_ciphers, fips_mode)
return failed
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
import abc
import enum
import logging
import threading
import time
from grpc._adapter import _intermediary_low
from grpc.framework.foundation import activated
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import relay
from grpc.framework.interfaces.links import links
@enum.unique
class _Read(enum.Enum):
AWAITING_METADATA = 'awaiting metadata'
READING = 'reading'
AWAITING_ALLOWANCE = 'awaiting allowance'
CLOSED = 'closed'
@enum.unique
class _HighWrite(enum.Enum):
OPEN = 'open'
CLOSED = 'closed'
@enum.unique
class _LowWrite(enum.Enum):
OPEN = 'OPEN'
ACTIVE = 'ACTIVE'
CLOSED = 'CLOSED'
class _RPCState(object):
def __init__(
self, call, request_serializer, response_deserializer, sequence_number,
read, allowance, high_write, low_write):
self.call = call
self.request_serializer = request_serializer
self.response_deserializer = response_deserializer
self.sequence_number = sequence_number
self.read = read
self.allowance = allowance
self.high_write = high_write
self.low_write = low_write
class _Kernel(object):
def __init__(
self, channel, host, request_serializers, response_deserializers,
ticket_relay):
self._lock = threading.Lock()
self._channel = channel
self._host = host
self._request_serializers = request_serializers
self._response_deserializers = response_deserializers
self._relay = ticket_relay
self._completion_queue = None
self._rpc_states = None
self._pool = None
def _on_write_event(self, operation_id, unused_event, rpc_state):
if rpc_state.high_write is _HighWrite.CLOSED:
rpc_state.call.complete(operation_id)
rpc_state.low_write = _LowWrite.CLOSED
else:
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, 1,
None, None, None, None, None, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
rpc_state.low_write = _LowWrite.OPEN
def _on_read_event(self, operation_id, event, rpc_state):
if event.bytes is None:
rpc_state.read = _Read.CLOSED
else:
if 0 < rpc_state.allowance:
rpc_state.allowance -= 1
rpc_state.call.read(operation_id)
else:
rpc_state.read = _Read.AWAITING_ALLOWANCE
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, None,
None, rpc_state.response_deserializer(event.bytes), None, None, None,
None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
def _on_metadata_event(self, operation_id, event, rpc_state):
rpc_state.allowance -= 1
rpc_state.call.read(operation_id)
rpc_state.read = _Read.READING
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None,
links.Ticket.Subscription.FULL, None, None, event.metadata, None, None,
None, None, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
def _on_finish_event(self, operation_id, event, rpc_state):
self._rpc_states.pop(operation_id, None)
if event.status.code is _intermediary_low.Code.OK:
termination = links.Ticket.Termination.COMPLETION
elif event.status.code is _intermediary_low.Code.CANCELLED:
termination = links.Ticket.Termination.CANCELLATION
elif event.status.code is _intermediary_low.Code.DEADLINE_EXCEEDED:
termination = links.Ticket.Termination.EXPIRATION
else:
termination = links.Ticket.Termination.TRANSMISSION_FAILURE
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, None,
None, None, event.metadata, event.status.code, event.status.details,
termination)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
def _spin(self, completion_queue):
while True:
event = completion_queue.get(None)
if event.kind is _intermediary_low.Event.Kind.STOP:
return
operation_id = event.tag
with self._lock:
if self._completion_queue is None:
continue
rpc_state = self._rpc_states.get(operation_id)
if rpc_state is not None:
if event.kind is _intermediary_low.Event.Kind.WRITE_ACCEPTED:
self._on_write_event(operation_id, event, rpc_state)
elif event.kind is _intermediary_low.Event.Kind.METADATA_ACCEPTED:
self._on_metadata_event(operation_id, event, rpc_state)
elif event.kind is _intermediary_low.Event.Kind.READ_ACCEPTED:
self._on_read_event(operation_id, event, rpc_state)
elif event.kind is _intermediary_low.Event.Kind.FINISH:
self._on_finish_event(operation_id, event, rpc_state)
elif event.kind is _intermediary_low.Event.Kind.COMPLETE_ACCEPTED:
pass
else:
logging.error('Illegal RPC event! %s', (event,))
def _invoke(
self, operation_id, group, method, initial_metadata, payload, termination,
timeout, allowance):
"""Invoke an RPC.
Args:
operation_id: Any object to be used as an operation ID for the RPC.
group: The group to which the RPC method belongs.
method: The RPC method name.
initial_metadata: The initial metadata object for the RPC.
payload: A payload object for the RPC or None if no payload was given at
invocation-time.
termination: A links.Ticket.Termination value or None indicated whether or
not more writes will follow from this side of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
allowance: The number of payloads (beyond the free first one) that the
local ticket exchange mate has granted permission to be read.
"""
if termination is links.Ticket.Termination.COMPLETION:
high_write = _HighWrite.CLOSED
elif termination is None:
high_write = _HighWrite.OPEN
else:
return
request_serializer = self._request_serializers.get((group, method))
response_deserializer = self._response_deserializers.get((group, method))
if request_serializer is None or response_deserializer is None:
cancellation_ticket = links.Ticket(
operation_id, 0, None, None, None, None, None, None, None, None, None,
None, links.Ticket.Termination.CANCELLATION)
self._relay.add_value(cancellation_ticket)
return
call = _intermediary_low.Call(
self._channel, self._completion_queue, '/%s/%s' % (group, method),
self._host, time.time() + timeout)
if initial_metadata is not None:
for metadata_key, metadata_value in initial_metadata:
call.add_metadata(metadata_key, metadata_value)
call.invoke(self._completion_queue, operation_id, operation_id)
if payload is None:
if high_write is _HighWrite.CLOSED:
call.complete(operation_id)
low_write = _LowWrite.CLOSED
else:
low_write = _LowWrite.OPEN
else:
call.write(request_serializer(payload), operation_id)
low_write = _LowWrite.ACTIVE
self._rpc_states[operation_id] = _RPCState(
call, request_serializer, response_deserializer, 0,
_Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance),
high_write, low_write)
def _advance(self, operation_id, rpc_state, payload, termination, allowance):
if payload is not None:
rpc_state.call.write(rpc_state.request_serializer(payload), operation_id)
rpc_state.low_write = _LowWrite.ACTIVE
if allowance is not None:
if rpc_state.read is _Read.AWAITING_ALLOWANCE:
rpc_state.allowance += allowance - 1
rpc_state.call.read(operation_id)
rpc_state.read = _Read.READING
else:
rpc_state.allowance += allowance
if termination is links.Ticket.Termination.COMPLETION:
rpc_state.high_write = _HighWrite.CLOSED
if rpc_state.low_write is _LowWrite.OPEN:
rpc_state.call.complete(operation_id)
rpc_state.low_write = _LowWrite.CLOSED
elif termination is not None:
rpc_state.call.cancel()
def add_ticket(self, ticket):
with self._lock:
if self._completion_queue is None:
return
if ticket.sequence_number == 0:
self._invoke(
ticket.operation_id, ticket.group, ticket.method,
ticket.initial_metadata, ticket.payload, ticket.termination,
ticket.timeout, ticket.allowance)
else:
rpc_state = self._rpc_states.get(ticket.operation_id)
if rpc_state is not None:
self._advance(
ticket.operation_id, rpc_state, ticket.payload,
ticket.termination, ticket.allowance)
def start(self):
"""Starts this object.
This method must be called before attempting to exchange tickets with this
object.
"""
with self._lock:
self._completion_queue = _intermediary_low.CompletionQueue()
self._rpc_states = {}
self._pool = logging_pool.pool(1)
self._pool.submit(self._spin, self._completion_queue)
def stop(self):
"""Stops this object.
This method must be called for proper termination of this object, and no
attempts to exchange tickets with this object may be made after this method
has been called.
"""
with self._lock:
self._completion_queue.stop()
self._completion_queue = None
pool = self._pool
self._pool = None
self._rpc_states = None
pool.shutdown(wait=True)
class InvocationLink(links.Link, activated.Activated):
"""A links.Link for use on the invocation-side of a gRPC connection.
Implementations of this interface are only valid for use when activated.
"""
__metaclass__ = abc.ABCMeta
class _InvocationLink(InvocationLink):
def __init__(
self, channel, host, request_serializers, response_deserializers):
self._relay = relay.relay(None)
self._kernel = _Kernel(
channel, host, request_serializers, response_deserializers, self._relay)
def _start(self):
self._relay.start()
self._kernel.start()
return self
def _stop(self):
self._kernel.stop()
self._relay.stop()
def accept_ticket(self, ticket):
"""See links.Link.accept_ticket for specification."""
self._kernel.add_ticket(ticket)
def join_link(self, link):
"""See links.Link.join_link for specification."""
self._relay.set_behavior(link.accept_ticket)
def __enter__(self):
"""See activated.Activated.__enter__ for specification."""
return self._start()
def __exit__(self, exc_type, exc_val, exc_tb):
"""See activated.Activated.__exit__ for specification."""
self._stop()
return False
def start(self):
"""See activated.Activated.start for specification."""
return self._start()
def stop(self):
"""See activated.Activated.stop for specification."""
self._stop()
def invocation_link(channel, host, request_serializers, response_deserializers):
"""Creates an InvocationLink.
Args:
channel: A channel for use by the link.
host: The host to specify when invoking RPCs.
request_serializers: A dict from group-method pair to request object
serialization behavior.
response_deserializers: A dict from group-method pair to response object
deserialization behavior.
Returns:
An InvocationLink.
"""
return _InvocationLink(
channel, host, request_serializers, response_deserializers)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import tensorflow as tf
import argparse
import os
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.varreplace import remap_variables
from dorefa import get_dorefa
"""
CIFAR10 DenseNet example. See: http://arxiv.org/abs/1608.06993
Code is developed based on Yuxin Wu's ResNet implementation: https://github.com/ppwwyyxx/tensorpack/tree/master/examples/ResNet
Results using DenseNet (L=40, K=12) on Cifar10 with data augmentation: ~5.77% test error.
Running time:
On one TITAN X GPU (CUDA 7.5 and cudnn 5.1), the code should run ~5iters/s on a batch size 64.
"""
BITW = 1
BITA = 2
BITG = 6
BATCH_SIZE = 64
class Model(ModelDesc):
def __init__(self, depth):
super(Model, self).__init__()
self.N = int((depth - 4) / 3)
self.growthRate =12
def _get_inputs(self):
return [InputDesc(tf.float32, [None, 32, 32, 3], 'input'),
InputDesc(tf.int32, [None], 'label')
]
def _build_graph(self, input_vars):
image, label = input_vars
#TODO something different here
image = image / 128.0 - 1
#============================= from dorefa ======================================
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
old_get_variable = tf.get_variable
# monkey-patch tf.get_variable to apply fw
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv0' in name or 'fct' in name:
return v
else:
logger.info("Binarizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
if BITA == 32:
return tf.nn.relu(x) # still use relu for 32bit cases
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
#============================= from dorefa ======================================
def conv(name, l, channel, stride):
return Conv2D(name, l, channel, 3, stride=stride,
nl=tf.identity, use_bias=False,
W_init=tf.random_normal_initializer(stddev=np.sqrt(2.0/9/channel)))
def add_layer(name, l):
shape = l.get_shape().as_list()
in_channel = shape[3]
with tf.variable_scope(name) as scope:
c = BatchNorm('bn1', l)
c = tf.nn.relu(c)
c = conv('conv1', c, self.growthRate, 1)
l = tf.concat([c, l], 3)
return l
def add_transition(name, l):
shape = l.get_shape().as_list()
in_channel = shape[3]
with tf.variable_scope(name) as scope:
l = BatchNorm('bn1', l)
l = tf.nn.relu(l)
l = Conv2D('conv1', l, in_channel, 1, stride=1, use_bias=False, nl=tf.nn.relu)
l = AvgPooling('pool', l, 2)
return l
def dense_net(name):
l = conv('conv0', image, 16, 1)
with tf.variable_scope('block1') as scope:
for i in range(self.N):
l = add_layer('dense_layer.{}'.format(i), l)
l = add_transition('transition1', l)
with tf.variable_scope('block2') as scope:
for i in range(self.N):
l = add_layer('dense_layer.{}'.format(i), l)
l = add_transition('transition2', l)
with tf.variable_scope('block3') as scope:
for i in range(self.N):
l = add_layer('dense_layer.{}'.format(i), l)
l = BatchNorm('bnlast', l)
l = tf.nn.relu(l)
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=10, nl=tf.identity)
return logits
with remap_variables(new_get_variable):
logits = dense_net("dense_net")
prob = tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label)
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W
wd_cost = tf.multiply(1e-4, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
add_moving_summary(cost, wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
tf.summary.scalar('learning_rate', lr)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def get_data(train_or_test):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
pp_mean = ds.get_per_pixel_mean()
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
#imgaug.Brightness(20),
#imgaug.Contrast((0.6,1.4)),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
def get_config():
log_dir = 'train_log/cifar10-single-fisrt%s-second%s-max%s' % (str(args.drop_1), str(args.drop_2), str(args.max_epoch))
logger.set_logger_dir(log_dir, action='n')
# prepare dataset
dataset_train = get_data('train')
steps_per_epoch = dataset_train.size()
dataset_test = get_data('test')
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError()]),
ScheduledHyperParamSetter('learning_rate',
[(1, 0.1), (args.drop_1, 0.01), (args.drop_2, 0.001)])
],
model=Model(depth=args.depth),
steps_per_epoch=steps_per_epoch,
max_epoch=args.max_epoch,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', help='load model')
parser.add_argument('--drop_1',default=150, help='Epoch to drop learning rate to 0.01.') # nargs='*' in multi mode
parser.add_argument('--drop_2',default=225,help='Epoch to drop learning rate to 0.001')
parser.add_argument('--depth',default=40, help='The depth of densenet')
parser.add_argument('--max_epoch',default=300,help='max epoch')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
SyncMultiGPUTrainer(config).train()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import re
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
@property
def pretty_id(self):
param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items())
return '{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.can_disable() and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers.difference_update(workers)
def disable_workers(self, workers):
self._remove_workers_from_tasks(workers, remove_stakeholders=False)
for worker in workers:
self.get_worker(worker).disabled = True
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
removed = self._state.prune(task, self._config)
if removed and task.id not in necessary_tasks:
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return not getattr(worker, 'disabled', False)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
worker_enabled = self.update(worker_id)
if worker_enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker_enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker_enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable and status != FAILED and worker_enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def disable_worker(self, worker):
self._state.disable_workers({worker})
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks(status=RUNNING):
if task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers:
upstream_status = self._upstream_status(task.id, upstream_table)
if upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
dep = self._state.get_task(dep_id)
if dep:
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack += [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
status = max((upstream_status_table.get(a_task_id, '')
for a_task_id in dep.deps),
key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
first_task_display_name=self._first_task_display_name(worker),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_running_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in self._resources.iteritems():
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit._cython import test_utilities
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
_SUCCESSFUL_CALLS = int(test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
_UNSUCCESSFUL_CALLS = test_constants.RPC_CONCURRENCY - _SUCCESSFUL_CALLS
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].cancelled())
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),
)
self._call.start_server_batch(operations,
_SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(call_completion_queue, server_completion_queue,
_REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(
test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server([(
b'grpc.so_reuseport',
0,
)], False)
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(), None,
None)
state = _State()
server_thread_args = (
state,
server,
server_completion_queue,
server_thread_pool,
)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call = channel.integrated_call(
_EMPTY_FLAGS, b'/twinkies', None, None, _EMPTY_METADATA,
None, ((
(
cygrpc.SendInitialMetadataOperation(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x45\x56',
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(
_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
tag,
),))
client_due.add(tag)
client_calls.append(client_call)
client_events_future = test_utilities.SimpleFuture(lambda: tuple(
channel.next_call_event() for _ in range(_SUCCESSFUL_CALLS)))
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_events_future.result()
with client_condition:
for client_call in client_calls:
client_call.cancel(cygrpc.StatusCode.cancelled, 'Cancelled!')
for _ in range(_UNSUCCESSFUL_CALLS):
channel.next_call_event()
channel.close(cygrpc.StatusCode.unknown, 'Cancelled on channel close!')
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
"""Implements the command line interface.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from os import path
import argparse
if hasattr(argparse, '__version__') and argparse.__version__ < '1.1':
raise RuntimeError('Needs at least argparse 1.1 to function, you are '+
'using: %s' % argparse.__version__)
# Resist the temptation to use "*". It won't work on Python 2.5.
from .commands import InitCommand, ExportCommand, ImportCommand, CommandError
from .env import IncompleteEnvironment, EnvironmentError, Environment, Language
from .config import Config
from .utils import Writer
__all__ = ('main', 'run',)
COMMANDS = {
'init': InitCommand,
'export': ExportCommand,
'import': ImportCommand,
}
def parse_args(argv):
"""Builds an argument parser based on all commands and configuration
values that we support.
"""
from . import get_version
parser = argparse.ArgumentParser(add_help=True,
description='Convert Android string resources to gettext .po '+
'files, an import them back.',
epilog='Written by: Michael Elsdoerfer <michael@elsdoerfer.com>')
parser.add_argument('--version', action='version', version=get_version())
# Create parser for arguments shared by all commands.
base_parser = argparse.ArgumentParser(add_help=False)
group = base_parser.add_mutually_exclusive_group()
group.add_argument('--verbose', '-v', action='store_true',
help='be extra verbose')
group.add_argument('--quiet', '-q', action='store_true',
help='be extra quiet')
base_parser.add_argument('--config', '-c', metavar='FILE',
help='config file to use')
# Add the arguments that set/override the configuration.
group = base_parser.add_argument_group('configuration',
'Those can also be specified in a configuration file. If given '
'here, values from the configuration file will be overwritten.')
Config.setup_arguments(group)
# Add our commands with the base arguments + their own.
subparsers = parser.add_subparsers(dest="command", title='commands',
description='valid commands',
help='additional help')
for name, cmdclass in list(COMMANDS.items()):
cmd_parser = subparsers.add_parser(name, parents=[base_parser], add_help=True)
group = cmd_parser.add_argument_group('command arguments')
cmdclass.setup_arg_parser(group)
return parser.parse_args(argv[1:])
def read_config(file):
"""Read the config file in ``file``.
``file`` may either be a file object, or a filename.
The config file currently is simply a file with command line options,
each option on a separate line.
Just for reference purposes, the following ticket should be noted,
which intends to extend argparse with support for configuration files:
http://code.google.com/p/argparse/issues/detail?id=35
Note however that the current patch doesn't seem to provide an easy
way to make paths in the config relative to the config file location,
as we currently need.
"""
if hasattr(file, 'read'):
lines = file.readlines()
if hasattr(file, 'name'):
filename = file.name
else:
filename = None
else:
# Open the config file and read the arguments.
filename = file
f = open(file, 'rb')
try:
lines = f.readlines()
finally:
f.close()
args = filter(lambda x: bool(x), # get rid of '' elements
[i.strip() for i in # get rid of surrounding whitespace
" ".join(filter(lambda x: not x.strip().startswith('#'),
lines)
).split(" ")])
# Use a parser that specifically only supports those options that
# we want to support within a config file (as opposed to all the
# options available through the command line interface).
parser = argparse.ArgumentParser(add_help=False)
Config.setup_arguments(parser)
config, unprocessed = parser.parse_known_args(args)
if unprocessed:
raise CommandError("unsupported config values: %s" % ' '.join(unprocessed))
# Post process the config: Paths in the config file should be relative
# to the config location, not the current working directory.
if filename:
Config.rebase_paths(config, path.dirname(filename))
return config
def make_env_and_writer(argv):
"""Given the command line arguments in ``argv``, construct an
environment.
This entails everything from parsing the command line, parsing
a config file, if there is one, merging the two etc.
Returns a 2-tuple (``Environment`` instance, ``Writer`` instance).
"""
# Parse the command line arguments first. This is helpful in
# that any potential syntax errors there will cause us to
# fail before doing anything else.
options = parse_args(argv)
# Setup the writer verbosity threshold based on the options.
writer = Writer()
if options.verbose:
writer.verbosity = 3
elif options.quiet:
writer.verbosity = 1
else:
writer.verbosity = 2
env = Environment(writer)
# Try to load a config file, either if given at the command line,
# or the one that was automatically found. Note that even if a
# config file is used, using the default paths is still supported.
# That is, you can provide some extra configuration values
# through a file, potentially shared across multiple projects, and
# still rely on simply calling the script inside a default
# project's directory hierarchy.
config_file = None
if options.config:
config_file = options.config
env.config_file = config_file
elif env.config_file:
config_file = env.config_file
writer.action('info', "Using auto-detected config file: %s" % config_file)
if config_file:
env.pop_from_config(read_config(config_file))
# Now that we have applied the config file, also apply the command
# line options. Those will thus override the config values.
env.pop_from_options(options)
# Some paths, if we still don't have values for them, can be deducted
# from the project directory.
env.auto_paths()
if env.auto_gettext_dir or env.auto_resource_dir:
# Let the user know we are deducting information from the
# project that we found.
writer.action('info',
"Assuming default directory structure in %s" % env.project_dir)
# Initialize the environment. This mainly loads the list of
# languages, but also does some basic validation.
try:
env.init()
except IncompleteEnvironment:
if not env.project_dir:
if not env.config_file:
raise CommandError('You need to run this from inside an '
'Android project directory, or specify the source and '
'target directories manually, either as command line '
'options, or through a configuration file')
else:
raise CommandError('Your configuration file does not specify '
'the source and target directory, and you are not running '
'the script from inside an Android project directory.')
except EnvironmentError as e:
raise CommandError(e)
# We're done. Just print some info out for the user.
writer.action('info',
"Using as Android resource dir: %s" % env.resource_dir)
writer.action('info', "Using as gettext dir: %s" % env.gettext_dir)
return env, writer
def main(argv):
"""The program.
Returns an error code or None.
"""
try:
# Build an environment from the list of arguments.
env, writer = make_env_and_writer(argv)
try:
cmd = COMMANDS[env.options.command](env, writer)
command_result = cmd.execute()
finally:
writer.finish()
return 1 if writer.erroneous else 0
except CommandError as e:
print('Error:', e)
return 2
def run():
"""Simplified interface to main().
"""
sys.exit(main(sys.argv) or 0)
| |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_cesc.util import base_units
from electrum_cesc.i18n import languages
from electrum_cesc_gui.kivy.i18n import _
from electrum_cesc.plugins import run_hook
from electrum_cesc.bitcoin import RECOMMENDED_FEE
from electrum_cesc import coinchooser
from choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_cesc_gui.kivy.i18n._
<SettingsItem@ButtonBehavior+BoxLayout>
orientation: 'vertical'
title: ''
description: ''
size_hint: 1, None
height: '60dp'
canvas.before:
Color:
rgba: (0.192, .498, 0.745, 1) if self.state == 'down' else (0.3, 0.3, 0.3, 0)
Rectangle:
size: self.size
pos: self.pos
on_release:
Clock.schedule_once(self.action)
Widget
TopLabel:
id: title
text: self.parent.title
bold: True
halign: 'left'
TopLabel:
text: self.parent.description
color: 0.8, 0.8, 0.8, 1
halign: 'left'
Widget
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
status: '' if root.disable_pin else ('ON' if root.use_encryption else 'OFF')
disabled: root.disable_pin
title: _('PIN code') + ': ' + self.status
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Cryptoescudo amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fee_status()
title: _('Fees') + ': ' + self.status
description: _("Fees paid to the Cryptoescudo miners.")
action: partial(root.fee_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: root.network_status()
title: _('Network') + ': ' + self.status
description: _("Network status and server selection.")
action: partial(root.network_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: root.rbf_status()
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
action: partial(root.rbf_dialog, self)
CardSeparator
SettingsItem:
status: root.coinselect_status()
title: _('Coin selection') + ': ' + self.status
description: "Coin selection method"
action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
# cached dialogs
self._fx_dialog = None
self._fee_dialog = None
self._rbf_dialog = None
self._network_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.use_encryption if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units.keys(), self.app.base_unit, cb)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def network_dialog(self, item, dt):
if self._network_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def cb(popup):
server = popup.ids.host.text
auto_connect = popup.ids.auto_connect.active
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.network_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/network.kv')
popup.ids.host.text = server
popup.ids.auto_connect.active = auto_connect
popup.on_dismiss = lambda: cb(popup)
self._network_dialog = popup
self._network_dialog.open()
def network_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return 'auto-connect' if auto_connect else server
def plugin_dialog(self, name, label, dt):
from checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
if self.config.get('dynamic_fees', True):
from electrum.util import fee_levels
return fee_levels[self.config.get('fee_level', 2)]
else:
F = self.config.get('fee_per_kb', RECOMMENDED_FEE)
return self.app.format_amount_and_units(F) + '/kB'
def fee_dialog(self, label, dt):
if self._fee_dialog is None:
from fee_dialog import FeeDialog
def cb():
label.status = self.fee_status()
self._fee_dialog = FeeDialog(self.app, self.config, cb)
self._fee_dialog.open()
def rbf_status(self):
return 'ON' if self.config.get('use_rbf') else 'OFF'
def rbf_dialog(self, label, dt):
if self._rbf_dialog is None:
from checkbox_dialog import CheckBoxDialog
def cb(x):
self.config.set_key('use_rbf', x, True)
label.status = self.rbf_status()
msg = [_('If you check this box, your transactions will be marked as non-final,'),
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pays higher fees.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
fullname = _('Replace by fee')
self._rbf_dialog = CheckBoxDialog(fullname, ' '.join(msg), self.config.get('use_rbf', False), cb)
self._rbf_dialog.open()
def fx_status(self):
p = self.plugins.get('exchange_rate')
if p:
source = p.exchange.name()
ccy = p.get_currency()
return '%s [%s]' %(ccy, source)
else:
return 'Disabled'
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
| |
from flask import Flask, Response, render_template, request
import json
from subprocess import Popen, PIPE
import os
from tempfile import mkdtemp
from werkzeug import secure_filename
app = Flask(__name__)
@app.route("/")
def index():
return """
Available API endpoints:
GET /containers List all containers
GET /containers?state=running List running containers (only)
GET /containers/<id> Inspect a specific container
GET /containers/<id>/logs Dump specific container logs
GET /images List all images
POST /images Create a new image
POST /containers Create a new container
PATCH /containers/<id> Change a container's state
PATCH /images/<id> Change a specific image's attributes
DELETE /containers/<id> Delete a specific container
DELETE /containers Delete all containers (including running)
DELETE /images/<id> Delete a specific image
DELETE /images Delete all images
"""
@app.route('/containers', methods=['GET'])
def containers_index():
"""
List all containers
curl -s -X GET -H 'Accept: application/json' http://localhost:8080/containers | python -mjson.tool
curl -s -X GET -H 'Accept: application/json' http://localhost:8080/containers?state=running | python -mjson.tool
"""
if request.args.get('state')=='running':
output=docker('ps')
else:
output = docker('ps','-a')
resp = json.dumps(docker_ps_to_array(output))
return Response(response=resp, mimetype="application/json")
@app.route('/images', methods=['GET'])
def images_index():
"""
List all images
Complete the code below generating a valid response.
"""
output = docker('images')
resp = json.dumps(docker_images_to_array(output))
return Response(response=resp, mimetype="application/json")
@app.route('/containers/<id>', methods=['GET'])
def containers_show(id):
"""
Inspect specific container
"""
output=docker('inspect',id)
resp = json.dumps(output)
return Response(response=resp, mimetype="application/json")
@app.route('/containers/<id>/logs', methods=['GET'])
def containers_log(id):
"""
Dump specific container logs
"""
output=docker('logs',id)
resp = json.dumps(docker_logs_to_object(id,output))
return Response(response=resp, mimetype="application/json")
@app.route('/images/<id>', methods=['DELETE'])
def images_remove(id):
"""
curl -s -X DELETE -H 'Accept: application/json' http://localhost:8080/images/<id> | python -mjson.tool
"""
docker ('rmi', id)
resp = '{"id": "%s"}' % id
return Response(response=resp, mimetype="application/json")
@app.route('/containers/<id>', methods=['DELETE'])
def containers_remove(id):
"""
curl -s -X DELETE 'Accept: application/json' http://localhost:8080/containers/<id> | python -mjson.tool
"""
if request.args.get('state')=='running':
docker(stop,id)
docker('rm',id)
resp = '{"id":"%s"}' %id
return Response(response=resp, mimetype="application/json")
@app.route('/containers', methods=['DELETE'])
def containers_remove_all():
"""
Force remove all containers - dangrous!
"""
containers= docker_ps_to_array(docker('ps','-a'))
for i in containers:
docker('rm',i['id'])
resp = '{"count":%id}' %len(containers)
return Response(response=resp, mimetype="application/json")
@app.route('/images', methods=['DELETE'])
def images_remove_all():
"""
Force remove all images - dangrous!
"""
images=docker_imags_to_array(docker('images'))
for i in images:
docker('rmi',i['id'])
resp = '{"count: %id"}' %len(images)
return Response(response=resp, mimetype="application/json")
@app.route('/containers', methods=['POST'])
def containers_create():
"""
Create container (from existing image using id or name)
curl -X POST -H 'Content-Type: application/json' http://localhost:8080/containers -d '{"image": "my-app"}'
curl -X POST -H 'Content-Type: application/json' http://localhost:8080/containers -d '{"image": "b14752a6590e"}'
curl -X POST -H 'Content-Type: application/json' http://localhost:8080/containers -d '{"image": "b14752a6590e","publish":"8081:22"}'
"""
body = request.get_json(force=True)
image = body['image']
args = ('run', '-d')
try:
publish=body['publish']
args=args+('-p',publish)
except:
pass
id = docker(*(args + (image,)))[0:12]
return Response(response='{"id": "%s"}' % id, mimetype="application/json")
@app.route('/images', methods=['POST'])
def images_create():
"""
Create image (from uploaded Dockerfile)
curl -H 'Accept: application/json' -F file=@Dockerfile http://localhost:8080/images
"""
dockerfile = request.files['file']
resp = ''
return Response(response=resp, mimetype="application/json")
@app.route('/containers/<id>', methods=['PATCH'])
def containers_update(id):
"""
Update container attributes (support: state=running|stopped)
curl -X PATCH -H 'Content-Type: application/json' http://localhost:8080/containers/b6cd8ea512c8 -d '{"state": "running"}'
curl -X PATCH -H 'Content-Type: application/json' http://localhost:8080/containers/b6cd8ea512c8 -d '{"state": "stopped"}'
"""
body = request.get_json(force=True)
try:
state = body['state']
if state == 'running':
docker('restart', id)
elif state == 'stopped':
docker('stop',id)
except:
pass
resp = '{"id": "%s"}' % id
return Response(response=resp, mimetype="application/json")
@app.route('/images/<id>', methods=['PATCH'])
def images_update(id):
"""
Update image attributes (support: name[:tag]) tag name should be lowercase only
curl -s -X PATCH -H 'Content-Type: application/json' http://localhost:8080/images/7f2619ed1768 -d '{"tag": "test:1.0"}'
"""
resp = ''
return Response(response=resp, mimetype="application/json")
def docker(*args):
cmd = ['docker']
for sub in args:
cmd.append(sub)
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if stderr.startswith('Error'):
print 'Error: {0} -> {1}'.format(' '.join(cmd), stderr)
return stderr + stdout
#
# Docker output parsing helpers
#
#
# Parses the output of a Docker PS command to a python List
#
def docker_ps_to_array(output):
all = []
for c in [line.split() for line in output.splitlines()[1:]]:
each = {}
each['id'] = c[0]
each['image'] = c[1]
each['name'] = c[-1]
each['ports'] = c[-2]
all.append(each)
return all
#
# Parses the output of a Docker logs command to a python Dictionary
# (Key Value Pair object)
def docker_logs_to_object(id, output):
logs = {}
logs['id'] = id
all = []
for line in output.splitlines():
all.append(line)
logs['logs'] = all
return logs
#
# Parses the output of a Docker image command to a python List
#
def docker_images_to_array(output):
all = []
for c in [line.split() for line in output.splitlines()[1:]]:
each = {}
each['id'] = c[2]
each['tag'] = c[1]
each['name'] = c[0]
all.append(each)
return all
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8080, debug=True)
| |
from ..utils import type_from_ast, is_valid_literal_value
from ..error import GraphQLError
from ..type.definition import is_composite_type, is_input_type, is_leaf_type, GraphQLNonNull
from ..language import ast
from ..language.visitor import Visitor, visit
from ..language.printer import print_ast
class ValidationRule(Visitor):
__slots__ = ['context']
def __init__(self, context):
self.context = context
class UniqueOperationNames(ValidationRule):
__slots__ = ['known_operation_names']
def __init__(self, context):
super(UniqueOperationNames, self).__init__(context)
self.known_operation_names = {}
def enter_OperationDefinition(self, node, *args):
operation_name = node.name
if not operation_name:
return
if operation_name.value in self.known_operation_names:
return GraphQLError(
self.duplicate_operation_name_message(operation_name.value),
[self.known_operation_names[operation_name.value], operation_name]
)
self.known_operation_names[operation_name.value] = operation_name
@staticmethod
def duplicate_operation_name_message(operation_name):
return 'There can only be one operation named "{}".'.format(operation_name)
class LoneAnonymousOperation(ValidationRule):
__slots__ = ['operation_count']
def __init__(self, context):
super(LoneAnonymousOperation, self).__init__(context)
self.operation_count = 0
def enter_Document(self, node, *args):
self.operation_count = \
sum(1 for definition in node.definitions if isinstance(definition, ast.OperationDefinition))
def enter_OperationDefinition(self, node, *args):
if not node.name and self.operation_count > 1:
return GraphQLError(self.anonymous_operation_not_alone_message(), [node])
@staticmethod
def anonymous_operation_not_alone_message():
return 'This anonymous operation must be the only defined operation.'
class KnownTypeNames(ValidationRule):
__slots__ = []
def enter_NamedType(self, node, *args):
type_name = node.name.value
type = self.context.get_schema().get_type(type_name)
if not type:
return GraphQLError(self.unknown_type_message(type_name), [node])
@staticmethod
def unknown_type_message(type):
return 'Unknown type "{}".'.format(type)
class FragmentsOnCompositeTypes(ValidationRule):
__slots__ = []
def enter_InlineFragment(self, node, *args):
type = self.context.get_type()
if type and not is_composite_type(type):
return GraphQLError(
self.inline_fragment_on_non_composite_error_message(print_ast(node.type_condition)),
[node.type_condition]
)
def enter_FragmentDefinition(self, node, *args):
type = self.context.get_type()
if type and not is_composite_type(type):
return GraphQLError(
self.fragment_on_non_composite_error_message(node.name.value, print_ast(node.type_condition)),
[node.type_condition]
)
@staticmethod
def inline_fragment_on_non_composite_error_message(type):
return 'Fragment cannot condition on non composite type "{}".'.format(type)
@staticmethod
def fragment_on_non_composite_error_message(frag_name, type):
return 'Fragment "{}" cannot condition on non composite type "{}".'.format(frag_name, type)
class VariablesAreInputTypes(ValidationRule):
__slots__ = []
def enter_VariableDefinition(self, node, *args):
type = type_from_ast(self.context.get_schema(), node.type)
if type and not is_input_type(type):
return GraphQLError(
self.non_input_type_on_variable_message(node.variable.name.value, print_ast(node.type)),
[node.type]
)
@staticmethod
def non_input_type_on_variable_message(variable_name, type_name):
return 'Variable "${}" cannot be non-input type "{}".'.format(variable_name, type_name)
class ScalarLeafs(ValidationRule):
__slots__ = []
def enter_Field(self, node, *args):
type = self.context.get_type()
if not type:
return
if is_leaf_type(type):
if node.selection_set:
return GraphQLError(
self.no_subselection_allowed_message(node.name.value, type),
[node.selection_set]
)
elif not node.selection_set:
return GraphQLError(
self.required_subselection_message(node.name.value, type),
[node]
)
@staticmethod
def no_subselection_allowed_message(field, type):
return 'Field "{}" of type "{}" must not have a sub selection.'.format(field, type)
@staticmethod
def required_subselection_message(field, type):
return 'Field "{}" of type "{}" must have a sub selection.'.format(field, type)
class FieldsOnCorrectType(ValidationRule):
__slots__ = []
def enter_Field(self, node, *args):
type = self.context.get_parent_type()
if not type:
return
field_def = self.context.get_field_def()
if not field_def:
return GraphQLError(
self.undefined_field_message(node.name.value, type.name),
[node]
)
@staticmethod
def undefined_field_message(field_name, type):
return 'Cannot query field "{}" on "{}".'.format(field_name, type)
class UniqueFragmentNames(ValidationRule):
__slots__ = ['known_fragment_names']
def __init__(self, context):
super(UniqueFragmentNames, self).__init__(context)
self.known_fragment_names = {}
def enter_FragmentDefinition(self, node, *args):
fragment_name = node.name.value
if fragment_name in self.known_fragment_names:
return GraphQLError(
self.duplicate_fragment_name_message(fragment_name),
[self.known_fragment_names[fragment_name], node.name]
)
self.known_fragment_names[fragment_name] = node.name
@staticmethod
def duplicate_fragment_name_message(field):
return 'There can only be one fragment named "{}".'.format(field)
class KnownFragmentNames(ValidationRule):
__slots__ = []
def enter_FragmentSpread(self, node, *args):
fragment_name = node.name.value
fragment = self.context.get_fragment(fragment_name)
if not fragment:
return GraphQLError(
self.unknown_fragment_message(fragment_name),
[node.name]
)
@staticmethod
def unknown_fragment_message(fragment_name):
return 'Unknown fragment "{}".'.format(fragment_name)
class NoUnusedFragments(ValidationRule):
__slots__ = ['fragment_definitions', 'spreads_within_operation', 'fragment_adjacencies', 'spread_names']
def __init__(self, context):
super(NoUnusedFragments, self).__init__(context)
self.fragment_definitions = []
self.spreads_within_operation = []
self.fragment_adjacencies = {}
self.spread_names = set()
def enter_OperationDefinition(self, *args):
self.spread_names = set()
self.spreads_within_operation.append(self.spread_names)
def enter_FragmentDefinition(self, node, *args):
self.fragment_definitions.append(node)
self.spread_names = set()
self.fragment_adjacencies[node.name.value] = self.spread_names
def enter_FragmentSpread(self, node, *args):
self.spread_names.add(node.name.value)
def leave_Document(self, *args):
fragment_names_used = set()
def reduce_spread_fragments(spreads):
for fragment_name in spreads:
if fragment_name in fragment_names_used:
continue
fragment_names_used.add(fragment_name)
if fragment_name in self.fragment_adjacencies:
reduce_spread_fragments(self.fragment_adjacencies[fragment_name])
for spreads in self.spreads_within_operation:
reduce_spread_fragments(spreads)
errors = [
GraphQLError(
self.unused_fragment_message(fragment_definition.name.value),
[fragment_definition]
)
for fragment_definition in self.fragment_definitions
if fragment_definition.name.value not in fragment_names_used
]
if errors:
return errors
@staticmethod
def unused_fragment_message(fragment_name):
return 'Fragment "{}" is never used.'.format(fragment_name)
class PossibleFragmentSpreads(ValidationRule):
pass
class NoFragmentCycles(ValidationRule):
__slots__ = ['spreads_in_fragment', 'known_to_lead_to_cycle']
def __init__(self, context):
super(NoFragmentCycles, self).__init__(context)
self.spreads_in_fragment = {
node.name.value: self.gather_spreads(node)
for node in context.get_ast().definitions
if isinstance(node, ast.FragmentDefinition)
}
self.known_to_lead_to_cycle = set()
def enter_FragmentDefinition(self, node, *args):
errors = []
initial_name = node.name.value
spread_path = []
# This will convert the ast.FragmentDefinition to something that we can add
# to a set. Otherwise we get a `unhashable type: dict` error.
# This makes it so that we can define a way to uniquely identify a FragmentDefinition
# within a set.
fragment_node_to_hashable = lambda fs: (fs.loc.start, fs.loc.end, fs.name.value)
def detect_cycle_recursive(fragment_name):
spread_nodes = self.spreads_in_fragment[fragment_name]
for spread_node in spread_nodes:
if fragment_node_to_hashable(spread_node) in self.known_to_lead_to_cycle:
continue
if spread_node.name.value == initial_name:
cycle_path = spread_path + [spread_node]
self.known_to_lead_to_cycle |= set(map(fragment_node_to_hashable, cycle_path))
errors.append(GraphQLError(
self.cycle_error_message(initial_name, [s.name.value for s in spread_path]),
cycle_path
))
continue
if any(spread is spread_node for spread in spread_path):
continue
spread_path.append(spread_node)
detect_cycle_recursive(spread_node.name.value)
spread_path.pop()
detect_cycle_recursive(initial_name)
if errors:
return errors
@staticmethod
def cycle_error_message(fragment_name, spread_names):
via = ' via {}'.format(', '.join(spread_names)) if spread_names else ''
return 'Cannot spread fragment "{}" within itself{}.'.format(fragment_name, via)
@classmethod
def gather_spreads(cls, node):
visitor = cls.CollectFragmentSpreadNodesVisitor()
visit(node, visitor)
return visitor.collect_fragment_spread_nodes()
class CollectFragmentSpreadNodesVisitor(Visitor):
__slots__ = ['spread_nodes']
def __init__(self):
self.spread_nodes = []
def enter_FragmentSpread(self, node, *args):
self.spread_nodes.append(node)
def collect_fragment_spread_nodes(self):
return self.spread_nodes
class NoUndefinedVariables(ValidationRule):
__slots__ = ['operation', 'visited_fragment_names', 'defined_variable_names']
visit_spread_fragments = True
def __init__(self, context):
self.operation = None
self.visited_fragment_names = set()
self.defined_variable_names = set()
super(NoUndefinedVariables, self).__init__(context)
@staticmethod
def undefined_var_message(var_name):
return 'Variable "${}" is not defined.'.format(var_name)
@staticmethod
def undefined_var_by_op_message(var_name, op_name):
return 'Variable "${}" is not defined by operation "{}".'.format(
var_name, op_name
)
def enter_OperationDefinition(self, node, *args):
self.operation = node
self.visited_fragment_names = set()
self.defined_variable_names = set()
def enter_VariableDefinition(self, node, *args):
self.defined_variable_names.add(node.variable.name.value)
def enter_Variable(self, variable, key, parent, path, ancestors):
var_name = variable.name.value
if var_name not in self.defined_variable_names:
within_fragment = any(isinstance(node, ast.FragmentDefinition) for node in ancestors)
if within_fragment and self.operation and self.operation.name:
return GraphQLError(
self.undefined_var_by_op_message(var_name, self.operation.name.value),
[variable, self.operation]
)
return GraphQLError(
self.undefined_var_message(var_name),
[variable]
)
def enter_FragmentSpread(self, spread_ast, *args):
if spread_ast.name.value in self.visited_fragment_names:
return False
self.visited_fragment_names.add(spread_ast.name.value)
class NoUnusedVariables(ValidationRule):
__slots__ = ['visited_fragment_names', 'variable_definitions', 'variable_name_used']
visit_spread_fragments = True
def __init__(self, context):
super(NoUnusedVariables, self).__init__(context)
self.visited_fragment_names = None
self.variable_definitions = None
self.variable_name_used = None
def enter_OperationDefinition(self, *args):
self.visited_fragment_names = set()
self.variable_definitions = []
self.variable_name_used = set()
def leave_OperationDefinition(self, *args):
errors = [
GraphQLError(
self.unused_variable_message(variable_definition.variable.name.value),
[variable_definition]
)
for variable_definition in self.variable_definitions
if variable_definition.variable.name.value not in self.variable_name_used
]
if errors:
return errors
def enter_VariableDefinition(self, node, *args):
if self.variable_definitions is not None:
self.variable_definitions.append(node)
return False
def enter_Variable(self, node, *args):
if self.variable_name_used is not None:
self.variable_name_used.add(node.name.value)
def enter_FragmentSpread(self, node, *args):
if self.visited_fragment_names is not None:
spread_name = node.name.value
if spread_name in self.visited_fragment_names:
return False
self.visited_fragment_names.add(spread_name)
@staticmethod
def unused_variable_message(variable_name):
return 'Variable "${}" is never used.'.format(variable_name)
class KnownDirectives(ValidationRule):
__slots__ = []
def enter_Directive(self, node, key, parent, path, ancestors):
directive_def = next((
definition for definition in self.context.get_schema().get_directives()
if definition.name == node.name.value
), None)
if not directive_def:
return GraphQLError(
self.unknown_directive_message(node.name.value),
[node]
)
applied_to = ancestors[-1]
if isinstance(applied_to, ast.OperationDefinition) and not directive_def.on_operation:
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'operation'),
[node]
)
if isinstance(applied_to, ast.Field) and not directive_def.on_field:
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'field'),
[node]
)
if (isinstance(applied_to, (ast.FragmentSpread, ast.InlineFragment, ast.FragmentDefinition)) and
not directive_def.on_fragment):
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'fragment'),
[node]
)
@staticmethod
def unknown_directive_message(directive_name):
return 'Unknown directive "{}".'.format(directive_name)
@staticmethod
def misplaced_directive_message(directive_name, placement):
return 'Directive "{}" may not be used on "{}".'.format(directive_name, placement)
class KnownArgumentNames(ValidationRule):
__slots__ = []
def enter_Argument(self, node, key, parent, path, ancestors):
argument_of = ancestors[-1]
if isinstance(argument_of, ast.Field):
field_def = self.context.get_field_def()
if not field_def:
return
field_arg_def = next((arg for arg in field_def.args if arg.name == node.name.value), None)
if not field_arg_def:
parent_type = self.context.get_parent_type()
assert parent_type
return GraphQLError(
self.unknown_arg_message(node.name.value, field_def.name, parent_type.name),
[node]
)
elif isinstance(argument_of, ast.Directive):
directive = self.context.get_directive()
if not directive:
return
directive_arg_def = next((arg for arg in directive.args if arg.name == node.name.value), None)
if not directive_arg_def:
return GraphQLError(
self.unknown_directive_arg_message(node.name.value, directive.name),
[node]
)
@staticmethod
def unknown_arg_message(arg_name, field_name, type):
return 'Unknown argument "{}" on field "{}" of type "{}".'.format(arg_name, field_name, type)
@staticmethod
def unknown_directive_arg_message(arg_name, directive_name):
return 'Unknown argument "{}" on directive "@{}".'.format(arg_name, directive_name)
class UniqueArgumentNames(ValidationRule):
__slots__ = ['known_arg_names']
def __init__(self, context):
super(UniqueArgumentNames, self).__init__(context)
self.known_arg_names = {}
def enter_Field(self, *args):
self.known_arg_names = {}
def enter_Directive(self, *args):
self.known_arg_names = {}
def enter_Argument(self, node, *args):
arg_name = node.name.value
if arg_name in self.known_arg_names:
return GraphQLError(
self.duplicate_arg_message(arg_name),
[self.known_arg_names[arg_name], node.name]
)
self.known_arg_names[arg_name] = node.name
@staticmethod
def duplicate_arg_message(field):
return 'There can only be one argument named "{}".'.format(field)
class ArgumentsOfCorrectType(ValidationRule):
__slots__ = []
def enter_Argument(self, node, *args):
arg_def = self.context.get_argument()
if arg_def and not is_valid_literal_value(arg_def.type, node.value):
return GraphQLError(
self.bad_value_message(node.name.value, arg_def.type,
print_ast(node.value)),
[node.value]
)
@staticmethod
def bad_value_message(arg_name, type, value):
return 'Argument "{}" expected type "{}" but got: {}.'.format(arg_name, type, value)
class ProvidedNonNullArguments(ValidationRule):
__slots__ = []
def leave_Field(self, node, *args):
field_def = self.context.get_field_def()
if not field_def:
return False
errors = []
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_def in field_def.args:
arg_ast = arg_ast_map.get(arg_def.name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
errors.append(GraphQLError(
self.missing_field_arg_message(node.name.value, arg_def.name, arg_def.type),
[node]
))
if errors:
return errors
def leave_Directive(self, node, *args):
directive_def = self.context.get_directive()
if not directive_def:
return False
errors = []
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_def in directive_def.args:
arg_ast = arg_ast_map.get(arg_def.name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
errors.append(GraphQLError(
self.missing_directive_arg_message(node.name.value, arg_def.name, arg_def.type),
[node]
))
if errors:
return errors
@staticmethod
def missing_field_arg_message(name, arg_name, type):
return 'Field "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
@staticmethod
def missing_directive_arg_message(name, arg_name, type):
return 'Directive "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
class DefaultValuesOfCorrectType(ValidationRule):
__slots__ = []
def enter_VariableDefinition(self, node, *args):
name = node.variable.name.value
default_value = node.default_value
type = self.context.get_input_type()
if isinstance(type, GraphQLNonNull) and default_value:
return GraphQLError(
self.default_for_non_null_arg_message(name, type, type.of_type),
[default_value]
)
if type and default_value and not is_valid_literal_value(type, default_value):
return GraphQLError(
self.bad_value_for_default_arg_message(name, type, print_ast(default_value)),
[default_value]
)
@staticmethod
def default_for_non_null_arg_message(var_name, type, guess_type):
return 'Variable "${}" of type "{}" is required and will not use the default value. ' \
'Perhaps you meant to use type "{}".'.format(var_name, type, guess_type)
@staticmethod
def bad_value_for_default_arg_message(var_name, type, value):
return 'Variable "${}" of type "{}" has invalid default value: {}.'.format(var_name, type, value)
class VariablesInAllowedPosition(ValidationRule):
pass
class OverlappingFieldsCanBeMerged(ValidationRule):
pass
| |
"""Custom tkinter frames that hold multiple widgets plus capabilities to
store data and send it to a callback.
Provides the following classes:
* _ToolBar: A base class for creating toolbars, intended to be subclassed
and extended.
* FirstOrderBar: holds numerical inputs required for first-order simulation
* SecondOrderBar: holds numerical inputs, plus a button with a pop-up 2D
array for entering chemical shifts and J coupling constants, for second-order
simulations of up to 8 coupled spins.
*SecondOrderSpinBar: Subclass of SecondOrderBar that uses spinbox widgets.
"""
import copy
from tkinter import *
import numpy as np
from nmrmint.GUI.widgets import (ArrayBox, ArraySpinBox, VarBox, IntBox)
from nmrmint.initialize import nspin_defaults
class _ToolBar(Frame):
"""Extend tkinter.Frame with a callback reference, a model
name, a _vars property, a reset button, and methods for the reset callback.
Intended to be subclassed, and not instantiated itself.
Methods:
* reset: resets the toolbar with provided vars Must be overridden by
subclass.
* restore_defaults: resets the toolbar with default vars.
Attributes:
* callback: the function to be called when a change to the toolbar's
widgets is detected
* model (str): the type of calculation requested (interpreted by the
Controller via the callback). To be overwritten by subclass.
* vars: (dict) The kwargs that the callback is called with.
"""
def __init__(self, parent=None, callback=None, **options):
"""Initialize the _ToolBar object with a reference to a callback.
Keyword arguments:
:param parent: the parent tkinter object
:param callback: the function to be called when a change to the
toolbar's widgets is detected
:param options: standard optional kwargs for a tkinter Frame
"""
Frame.__init__(self, parent, **options)
self.callback = callback
self.model = 'model' # must be overwritten by subclasses
self._defaults = {} # overwrite for subclasses
self._vars = {} # overwrite for subclasses
self._reset_button = Button(self,
name='reset_button',
text='Reset',
command=lambda:
self._restore_defaults_and_refresh())
self._reset_button.pack(side=RIGHT)
@property
def vars(self):
return self._vars
# @vars.setter not needed
@vars.getter
def vars(self):
return self._vars
def _restore_defaults_and_refresh(self):
self.restore_defaults()
self.callback()
def restore_defaults(self):
# deepcopy to prevent corruption of _defaults by reset
self.reset(copy.deepcopy(self._defaults))
def reset(self, _vars):
pass
class FirstOrderBar(_ToolBar):
"""A subclass of _ToolBar designed for use with first-order (single-signal)
simulations.
Extends _ToolBar with a series of input widgets and a _fields dict to
reference them.
Overrides the following methods:
* reset
Overrides the following attributes:
* self.model
* self._defaults
* self._vars
"""
def __init__(self, parent=None, **options):
"""Instantiate the _ToolBar with appropriate widgets for first-order
calculations.
"""
_ToolBar.__init__(self, parent, **options)
self.model = 'first_order'
self._defaults = {'JAX': 7.00,
'#A': 2,
'JBX': 3.00,
'#B': 1,
'JCX': 2.00,
'#C': 0,
'JDX': 7,
'#D': 0,
'Vcentr': 0.5,
'# of nuclei': 1,
'width': 0.5}
self._vars = self._defaults.copy()
self._fields = {}
kwargs = {'dict_': self.vars,
'callback': self.callback}
for key in ['# of nuclei', 'JAX', '#A', 'JBX', '#B', 'JCX', '#C',
'JDX', '#D', 'Vcentr', 'width']:
if '#' not in key:
widget = VarBox(self, name=key, **kwargs)
else:
widget = IntBox(self, name=key, **kwargs)
self._fields[key] = widget
widget.pack(side=LEFT)
def reset(self, vars_):
"""Reset the toolbar with provided variables.
:param vars_: {}
"""
for key, val in vars_.items():
self.vars[key] = val
widget = self._fields[key]
widget.set_value(val)
class SecondOrderBar(_ToolBar):
"""
Extends Frame to hold n frequency entry boxes, an entry box for peak
width (default 0.5 Hz), a 2-D numpy array for frequencies (see below),
a 2-D numpy array for coupling constants, and a button to pop up a window
for entering J values as well as frequencies.
Overrides the following methods:
* reset
Overrides the following attributes:
* self.model
* self._defaults
* self._vars
* self.vars (@property)
"""
def __init__(self, parent=None, n=4, **options):
"""Initialize the frame with necessary widgets and attributes.
Keyword arguments:
:param parent: the parent tkinter object
:param callback: the Controller object of the MVC application
:param n: the number of nuclei in the spin system
:param options: standard optional kwargs for a tkinter Frame
"""
# The ArrayBox and ArraySpinBox widgets currently handle 2-D arrays
# only, so the frequencies only occupy the first row of a 1-row
# 2-dimensional array (self._v_ppm), and the peak widths the first
# column of the first row of a 1-cell 2-D array (self._w_array). i.e.
# self._v_ppm [0,:] provides a 1-D numpy array of the frequencies,
# and self._w_array[0, 0] provides the peak width.
# TODO: refactor this so v and w have an intuitive data type
_ToolBar.__init__(self, parent, **options)
self.model = 'nspin'
self._v_ppm, self._j = nspin_defaults(n)
self._w_array = np.array([[0.5]])
self._vars = self._create_var_dict()
self._defaults = copy.deepcopy(self._vars) # for resetting toolbar
self._fields = {}
self._add_frequency_widgets(n)
self._add_peakwidth_widget()
self._add_J_button(n)
@property
def vars(self):
return self._vars
@vars.getter
def vars(self):
self._vars = self._create_var_dict()
return self._vars
# @vars.setter not needed
def _create_var_dict(self):
return {'v': self._v_ppm,
'j': self._j,
'w': self._w_array}
def _add_frequency_widgets(self, n):
"""Add frequency-entry widgets to the toolbar.
:param n: (int) The number of nuclei being simulated.
"""
for freq in range(n):
name = 'V' + str(freq + 1)
vbox = ArrayBox(self, array=self._v_ppm, coord=(0, freq),
name=name,
callback=self.callback)
self._fields[name] = vbox
vbox.pack(side=LEFT)
def _add_peakwidth_widget(self):
"""Add peak width-entry widget to the toolbar."""
wbox = ArrayBox(self, array=self._w_array, coord=(0, 0), name="W",
callback=self.callback)
self._fields['W'] = wbox
wbox.pack(side=LEFT)
def _add_J_button(self, n):
"""Add a button to the toolbar that will pop up the J-entry window.
:param n: (int) The number of nuclei being simulated.
"""
vj_button = Button(self, text="Enter Js",
command=lambda: self._vj_popup(n))
vj_button.pack(side=LEFT, expand=N, fill=NONE)
def _vj_popup(self, n):
"""
Creates a new Toplevel window that provides entries for both
frequencies and J couplings, and updates self.v and self._j when
entries change.
:param n: number of spins
"""
tl = Toplevel()
Label(tl, text='Second-Order Simulation').pack(side=TOP)
datagrid = Frame(tl)
# For gridlines, background set to the line color (e.g. 'black')
datagrid.config(background='black')
Label(datagrid, bg='gray90').grid(row=0, column=0, sticky=NSEW,
padx=1, pady=1)
for col in range(1, n + 1):
Label(datagrid, text='V%d' % col, width=8, height=3,
bg='gray90').grid(
row=0, column=col, sticky=NSEW, padx=1, pady=1)
for row in range(1, n + 1):
vtext = "V" + str(row)
v = ArrayBox(datagrid, array=self._v_ppm,
coord=(0, row - 1), # V1 stored in v[0, 0], etc.
name=vtext, color='gray90',
callback=self.callback)
v.grid(row=row, column=0, sticky=NSEW, padx=1, pady=1)
for col in range(1, n + 1):
if col < row:
j = ArrayBox(datagrid, array=self._j,
# J12 stored in _j[0, 1] (and _j[1, 0]) etc
coord=(col - 1, row - 1),
name="J%d%d" % (col, row),
callback=self.callback)
j.grid(row=row, column=col, sticky=NSEW, padx=1, pady=1)
else:
Label(datagrid, bg='grey').grid(
row=row, column=col, sticky=NSEW, padx=1, pady=1)
datagrid.pack()
def reset(self, _vars):
"""Reset the toolbar with supplied vars.
:param _vars: {'v': 2D np.array of [[ppm chemical shifts...]],
'j': 2D np.array of Js in Hz,
'w': 2D np.array of [[peak width]]}
TODO: factor out clunky use of 2D arrays for v and w, to 1D array and
float.
"""
# bug fix: ._v_ppm and ._w_array cannot be completely replaced with
# corresponding _vars array, because it will break links to widget
# arrays.
self._v_ppm[0] = _vars['v'][0]
# .j is only used in popup window, where ArrayBox widgets are created
# fresh each time. So, OK for now to replace entire _j array
# reference.
self._j = _vars['j']
self._w_array[0][0] = _vars['w'][0][0]
for i, freq in enumerate(self._v_ppm[0]):
name = 'V' + str(i + 1)
widget = self._fields[name]
widget.set_value(freq)
width_widget = self._fields['W']
width = self._w_array[0][0]
width_widget.set_value(width)
# TODO: most recent changes have used SecondOrderBar. If SecondOrderSpinBar
# is still a possible option, make sure that it is updated to be a complete
# swap for SecondOrderBar
class SecondOrderSpinBar(SecondOrderBar):
"""A subclass of SecondOrderBar that uses ArraySpinBox widgets for the
toolbar.
Overrides _add_frequency_widgets and _add_peakwidth_widget.
Extends SecondOrderBar with _spinbox_kwargs for spinbox instantiation.
"""
def __init__(self, parent=None,
from_=-1.0, to=15.0, increment=0.01, realtime=False,
**options):
"""Initialize subclass of SecondOrderBar with extra arguments for the
SpinBox minimum and maximum values, standard increment, and realtime
behavior.
:param from_: (float) the minimum value for the spinboxes
:param to: (float) the maximum value for the spinboxes
:param increment: (float) the amount to increment/decrement the SpinBox
contents per arrow click.
:param realtime: (bool) True if callback should be repeatedly called
as a SpinBox arrow is being held down.
"""
self._spinbox_kwargs = {'from_': from_,
'to': to,
'increment': increment,
'realtime': realtime}
SecondOrderBar.__init__(self, parent, **options)
def _add_frequency_widgets(self, n):
"""Add frequency-entry widgets to the toolbar.
:param n: (int) The number of nuclei being simulated.
"""
for freq in range(n):
vbox = ArraySpinBox(self, array=self._v_ppm, coord=(0, freq),
name='V' + str(freq + 1),
callback=self.callback,
**self._spinbox_kwargs)
vbox.pack(side=LEFT)
def _add_peakwidth_widget(self):
"""
Currently hard-wired to vary from 0.01 to 100 Hz, with an increment
of 0.1 Hz.
"""
wbox = ArraySpinBox(self, array=self._w_array, coord=(0, 0),
name="W",
callback=self.callback,
from_=0.01, to=100, increment=0.1,
realtime=self._spinbox_kwargs['realtime'])
wbox.pack(side=LEFT)
if __name__ == '__main__':
def dummy_callback(*args, **kwargs):
print(args)
print(kwargs)
root = Tk()
root.title('test toolbars')
toolbars = [FirstOrderBar, SecondOrderBar, SecondOrderSpinBar]
for toolbar in toolbars:
toolbar(root, callback=dummy_callback).pack(side=TOP)
# workaround fix for Tk problems and mac mouse/trackpad:
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import feed_placeholder_view
from google.ads.googleads.v8.services.types import feed_placeholder_view_service
from .transports.base import (
FeedPlaceholderViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import FeedPlaceholderViewServiceGrpcTransport
class FeedPlaceholderViewServiceClientMeta(type):
"""Metaclass for the FeedPlaceholderViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedPlaceholderViewServiceTransport]]
_transport_registry["grpc"] = FeedPlaceholderViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedPlaceholderViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedPlaceholderViewServiceClient(
metaclass=FeedPlaceholderViewServiceClientMeta
):
"""Service to fetch feed placeholder views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedPlaceholderViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedPlaceholderViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedPlaceholderViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
FeedPlaceholderViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def feed_placeholder_view_path(
customer_id: str, placeholder_type: str,
) -> str:
"""Return a fully-qualified feed_placeholder_view string."""
return "customers/{customer_id}/feedPlaceholderViews/{placeholder_type}".format(
customer_id=customer_id, placeholder_type=placeholder_type,
)
@staticmethod
def parse_feed_placeholder_view_path(path: str) -> Dict[str, str]:
"""Parse a feed_placeholder_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feedPlaceholderViews/(?P<placeholder_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedPlaceholderViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the feed placeholder view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.FeedPlaceholderViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedPlaceholderViewServiceTransport):
# transport is a FeedPlaceholderViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = FeedPlaceholderViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_feed_placeholder_view(
self,
request: feed_placeholder_view_service.GetFeedPlaceholderViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_placeholder_view.FeedPlaceholderView:
r"""Returns the requested feed placeholder view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetFeedPlaceholderViewRequest`):
The request object. Request message for
[FeedPlaceholderViewService.GetFeedPlaceholderView][google.ads.googleads.v8.services.FeedPlaceholderViewService.GetFeedPlaceholderView].
resource_name (:class:`str`):
Required. The resource name of the
feed placeholder view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.FeedPlaceholderView:
A feed placeholder view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_placeholder_view_service.GetFeedPlaceholderViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, feed_placeholder_view_service.GetFeedPlaceholderViewRequest
):
request = feed_placeholder_view_service.GetFeedPlaceholderViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_feed_placeholder_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("FeedPlaceholderViewServiceClient",)
| |
import numpy as np
from numpy.testing import run_module_suite, assert_array_equal, assert_raises
from skimage import img_as_ubyte, img_as_uint, img_as_float
from skimage import data, util
from skimage.morphology import cmorph, disk
from skimage.filter import rank
def test_random_sizes():
# make sure the size is not a problem
niter = 10
elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
for m, n in np.random.random_integers(1, 100, size=(10, 2)):
mask = np.ones((m, n), dtype=np.uint8)
image8 = np.ones((m, n), dtype=np.uint8)
out8 = np.empty_like(image8)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=0, shift_y=0)
assert_array_equal(image8.shape, out8.shape)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=+1, shift_y=+1)
assert_array_equal(image8.shape, out8.shape)
image16 = np.ones((m, n), dtype=np.uint16)
out16 = np.empty_like(image8, dtype=np.uint16)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=0, shift_y=0)
assert_array_equal(image16.shape, out16.shape)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=+1, shift_y=+1)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
def test_compare_with_cmorph_dilate():
# compare the result of maximum filter with dilate
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._dilate(image=image, selem=elem)
assert_array_equal(out, cm)
def test_compare_with_cmorph_erode():
# compare the result of maximum filter with erode
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.minimum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._erode(image=image, selem=elem)
assert_array_equal(out, cm)
def test_bitdepth():
# test the different bit depth for rank16
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty((100, 100), dtype=np.uint16)
mask = np.ones((100, 100), dtype=np.uint8)
for i in range(5):
image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
r = rank.mean_percentile(image=image, selem=elem, mask=mask,
out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
def test_population():
# check the number of valid pixels in the neighborhood
image = np.zeros((5, 5), dtype=np.uint8)
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.pop(image=image, selem=elem, out=out, mask=mask)
r = np.array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]])
assert_array_equal(r, out)
def test_structuring_element8():
# check the output for a custom structuring element
r = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 0],
[0, 0, 255, 255, 255, 0],
[0, 0, 0, 255, 255, 0],
[0, 0, 0, 0, 0, 0]])
# 8-bit
image = np.zeros((6, 6), dtype=np.uint8)
image[2, 2] = 255
elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
# 16-bit
image = np.zeros((6, 6), dtype=np.uint16)
image[2, 2] = 255
out = np.empty_like(image)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
def test_pass_on_bitdepth():
# should pass because data bitdepth is not too high for the function
image = np.ones((100, 100), dtype=np.uint16) * 2 ** 11
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
def test_inplace_output():
# rank filters are not supposed to filter inplace
selem = disk(20)
image = (np.random.random((500, 500)) * 256).astype(np.uint8)
out = image
assert_raises(NotImplementedError, rank.mean, image, selem, out=out)
def test_compare_autolevels():
# compare autolevel and percentile autolevel with p0=0.0 and p1=1.0
# should returns the same arrays
image = util.img_as_ubyte(data.camera())
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_autolevels_16bit():
# compare autolevel(16-bit) and percentile autolevel(16-bit) with p0=0.0
# and p1=1.0 should returns the same arrays
image = data.camera().astype(np.uint16) * 4
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_ubyte_vs_float():
# Create signed int8 image that and convert it to uint8
image_uint = img_as_ubyte(data.camera()[:50, :50])
image_float = img_as_float(image_uint)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold',
'subtract_mean', 'enhance_contrast', 'pop', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_uint, disk(3))
out_f = func(image_float, disk(3))
assert_array_equal(out_u, out_f)
def test_compare_8bit_unsigned_vs_signed():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
# Create signed int8 image that and convert it to uint8
image = img_as_ubyte(data.camera())
image[image > 127] = 0
image_s = image.astype(np.int8)
image_u = img_as_ubyte(image_s)
assert_array_equal(image_u, img_as_ubyte(image_s))
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_u, disk(3))
out_s = func(image_s, disk(3))
assert_array_equal(out_u, out_s)
def test_compare_8bit_vs_16bit():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
image8 = util.img_as_ubyte(data.camera())
image16 = image8.astype(np.uint16)
assert_array_equal(image8, image16)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
f8 = func(image8, disk(3))
f16 = func(image16, disk(3))
assert_array_equal(f8, f16)
def test_trivial_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_trivial_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_empty_selem():
# check that min, max and mean returns zeros if structuring element is
# empty
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
res = np.zeros_like(image)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
def test_otsu():
# test the local Otsu segmentation on a synthetic image
# (left to right ramp * sinus)
test = np.tile([128, 145, 103, 127, 165, 83, 127, 185, 63, 127, 205, 43,
127, 225, 23, 127],
(16, 1))
test = test.astype(np.uint8)
res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1))
selem = np.ones((6, 6), dtype=np.uint8)
th = 1 * (test >= rank.otsu(test, selem))
assert_array_equal(th, res)
def test_entropy():
# verify that entropy is coherent with bitdepth of the input data
selem = np.ones((16, 16), dtype=np.uint8)
# 1 bit per pixel
data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 1)
# 2 bit per pixel
data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 2)
# 3 bit per pixel
data = np.tile(
np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 3)
# 4 bit per pixel
data = np.tile(
np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 4)
# 6 bit per pixel
data = np.tile(
np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 6)
# 8-bit per pixel
data = np.tile(
np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 8)
# 12 bit per pixel
selem = np.ones((64, 64), dtype=np.uint8)
data = np.tile(
np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
assert(np.max(rank.entropy(data, selem)) == 12)
# make sure output is of dtype double
out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))
assert out.dtype == np.double
def test_selem_dtypes():
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
for dtype in (np.uint8, np.uint16, np.int32, np.int64,
np.float32, np.float64):
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.mean_percentile(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_16bit():
image = np.zeros((21, 21), dtype=np.uint16)
selem = np.ones((3, 3), dtype=np.uint8)
for bitdepth in range(17):
value = 2 ** bitdepth - 1
image[10, 10] = value
assert rank.minimum(image, selem)[10, 10] == 0
assert rank.maximum(image, selem)[10, 10] == value
assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
def test_bilateral():
image = np.zeros((21, 21), dtype=np.uint16)
selem = np.ones((3, 3), dtype=np.uint8)
image[10, 10] = 1000
image[10, 11] = 1010
image[10, 9] = 900
assert rank.mean_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1000
assert rank.pop_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1
assert rank.mean_bilateral(image, selem, s0=11, s1=11)[10, 10] == 1005
assert rank.pop_bilateral(image, selem, s0=11, s1=11)[10, 10] == 2
def test_percentile_min():
# check that percentile p0 = 0 is identical to local min
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=0)
img_min = rank.minimum(img, selem=selem)
assert_array_equal(img_p0, img_min)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=0)
img_min = rank.minimum(img16, selem=selem)
assert_array_equal(img_p0, img_min)
def test_percentile_max():
# check that percentile p0 = 1 is identical to local max
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=1.)
img_max = rank.maximum(img, selem=selem)
assert_array_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=1.)
img_max = rank.maximum(img16, selem=selem)
assert_array_equal(img_p0, img_max)
def test_percentile_median():
# check that percentile p0 = 0.5 is identical to local median
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=.5)
img_max = rank.median(img, selem=selem)
assert_array_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=.5)
img_max = rank.median(img16, selem=selem)
assert_array_equal(img_p0, img_max)
if __name__ == "__main__":
run_module_suite()
| |
import itertools
import heapq
import collections
import operator
from functools import partial
from random import Random
from .compatibility import map, filterfalse, zip, zip_longest, iteritems, filter
from .utils import no_default
__all__ = ('remove', 'accumulate', 'groupby', 'merge_sorted', 'interleave',
'unique', 'isiterable', 'isdistinct', 'take', 'drop', 'take_nth',
'first', 'second', 'nth', 'last', 'get', 'concat', 'concatv',
'mapcat', 'cons', 'interpose', 'frequencies', 'reduceby', 'iterate',
'sliding_window', 'partition', 'partition_all', 'count', 'pluck',
'join', 'tail', 'diff', 'topk', 'peek', 'random_sample')
def remove(predicate, seq):
""" Return those items of sequence for which predicate(item) is False
>>> def iseven(x):
... return x % 2 == 0
>>> list(remove(iseven, [1, 2, 3, 4]))
[1, 3]
"""
return filterfalse(predicate, seq)
def accumulate(binop, seq, initial=no_default):
""" Repeatedly apply binary function to a sequence, accumulating results
>>> from operator import add, mul
>>> list(accumulate(add, [1, 2, 3, 4, 5]))
[1, 3, 6, 10, 15]
>>> list(accumulate(mul, [1, 2, 3, 4, 5]))
[1, 2, 6, 24, 120]
Accumulate is similar to ``reduce`` and is good for making functions like
cumulative sum:
>>> from functools import partial, reduce
>>> sum = partial(reduce, add)
>>> cumsum = partial(accumulate, add)
Accumulate also takes an optional argument that will be used as the first
value. This is similar to reduce.
>>> list(accumulate(add, [1, 2, 3], -1))
[-1, 0, 2, 5]
>>> list(accumulate(add, [], 1))
[1]
See Also:
itertools.accumulate : In standard itertools for Python 3.2+
"""
seq = iter(seq)
result = next(seq) if initial == no_default else initial
yield result
for elem in seq:
result = binop(result, elem)
yield result
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def merge_sorted(*seqs, **kwargs):
""" Merge and sort a collection of sorted collections
This works lazily and only keeps one value from each iterable in memory.
>>> list(merge_sorted([1, 3, 5], [2, 4, 6]))
[1, 2, 3, 4, 5, 6]
>>> ''.join(merge_sorted('abc', 'abc', 'abc'))
'aaabbbccc'
The "key" function used to sort the input may be passed as a keyword.
>>> list(merge_sorted([2, 3], [1, 3], key=lambda x: x // 3))
[2, 1, 3, 3]
"""
if len(seqs) == 0:
return iter([])
elif len(seqs) == 1:
return iter(seqs[0])
key = kwargs.get('key', None)
if key is None:
return _merge_sorted_binary(seqs)
else:
return _merge_sorted_binary_key(seqs, key)
def _merge_sorted_binary(seqs):
mid = len(seqs) // 2
L1 = seqs[:mid]
if len(L1) == 1:
seq1 = iter(L1[0])
else:
seq1 = _merge_sorted_binary(L1)
L2 = seqs[mid:]
if len(L2) == 1:
seq2 = iter(L2[0])
else:
seq2 = _merge_sorted_binary(L2)
try:
val2 = next(seq2)
except StopIteration:
for val1 in seq1:
yield val1
return
for val1 in seq1:
if val2 < val1:
yield val2
for val2 in seq2:
if val2 < val1:
yield val2
else:
yield val1
break
else:
break
else:
yield val1
else:
yield val2
for val2 in seq2:
yield val2
return
yield val1
for val1 in seq1:
yield val1
def _merge_sorted_binary_key(seqs, key):
mid = len(seqs) // 2
L1 = seqs[:mid]
if len(L1) == 1:
seq1 = iter(L1[0])
else:
seq1 = _merge_sorted_binary_key(L1, key)
L2 = seqs[mid:]
if len(L2) == 1:
seq2 = iter(L2[0])
else:
seq2 = _merge_sorted_binary_key(L2, key)
try:
val2 = next(seq2)
except StopIteration:
for val1 in seq1:
yield val1
return
key2 = key(val2)
for val1 in seq1:
key1 = key(val1)
if key2 < key1:
yield val2
for val2 in seq2:
key2 = key(val2)
if key2 < key1:
yield val2
else:
yield val1
break
else:
break
else:
yield val1
else:
yield val2
for val2 in seq2:
yield val2
return
yield val1
for val1 in seq1:
yield val1
def interleave(seqs):
""" Interleave a sequence of sequences
>>> list(interleave([[1, 2], [3, 4]]))
[1, 3, 2, 4]
>>> ''.join(interleave(('ABC', 'XY')))
'AXBYC'
Both the individual sequences and the sequence of sequences may be infinite
Returns a lazy iterator
"""
iters = itertools.cycle(map(iter, seqs))
while True:
try:
for itr in iters:
yield next(itr)
return
except StopIteration:
predicate = partial(operator.is_not, itr)
iters = itertools.cycle(itertools.takewhile(predicate, iters))
def unique(seq, key=None):
""" Return only unique elements of a sequence
>>> tuple(unique((1, 2, 3)))
(1, 2, 3)
>>> tuple(unique((1, 2, 1, 3)))
(1, 2, 3)
Uniqueness can be defined by key keyword
>>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))
('cat', 'mouse')
"""
seen = set()
seen_add = seen.add
if key is None:
for item in seq:
if item not in seen:
seen_add(item)
yield item
else: # calculate key
for item in seq:
val = key(item)
if val not in seen:
seen_add(val)
yield item
def isiterable(x):
""" Is x iterable?
>>> isiterable([1, 2, 3])
True
>>> isiterable('abc')
True
>>> isiterable(5)
False
"""
try:
iter(x)
return True
except TypeError:
return False
def isdistinct(seq):
""" All values in sequence are distinct
>>> isdistinct([1, 2, 3])
True
>>> isdistinct([1, 2, 1])
False
>>> isdistinct("Hello")
False
>>> isdistinct("World")
True
"""
if iter(seq) is seq:
seen = set()
seen_add = seen.add
for item in seq:
if item in seen:
return False
seen_add(item)
return True
else:
return len(seq) == len(set(seq))
def take(n, seq):
""" The first n elements of a sequence
>>> list(take(2, [10, 20, 30, 40, 50]))
[10, 20]
See Also:
drop
tail
"""
return itertools.islice(seq, n)
def tail(n, seq):
""" The last n elements of a sequence
>>> tail(2, [10, 20, 30, 40, 50])
[40, 50]
See Also:
drop
take
"""
try:
return seq[-n:]
except (TypeError, KeyError):
return tuple(collections.deque(seq, n))
def drop(n, seq):
""" The sequence following the first n elements
>>> list(drop(2, [10, 20, 30, 40, 50]))
[30, 40, 50]
See Also:
take
tail
"""
return itertools.islice(seq, n, None)
def take_nth(n, seq):
""" Every nth item in seq
>>> list(take_nth(2, [10, 20, 30, 40, 50]))
[10, 30, 50]
"""
return itertools.islice(seq, 0, None, n)
def first(seq):
""" The first element in a sequence
>>> first('ABC')
'A'
"""
return next(iter(seq))
def second(seq):
""" The second element in a sequence
>>> second('ABC')
'B'
"""
return next(itertools.islice(seq, 1, None))
def nth(n, seq):
""" The nth element in a sequence
>>> nth(1, 'ABC')
'B'
"""
if isinstance(seq, (tuple, list, collections.Sequence)):
return seq[n]
else:
return next(itertools.islice(seq, n, None))
def last(seq):
""" The last element in a sequence
>>> last('ABC')
'C'
"""
return tail(1, seq)[0]
rest = partial(drop, 1)
def _get(ind, seq, default):
try:
return seq[ind]
except (KeyError, IndexError):
return default
def get(ind, seq, default=no_default):
""" Get element in a sequence or dict
Provides standard indexing
>>> get(1, 'ABC') # Same as 'ABC'[1]
'B'
Pass a list to get multiple values
>>> get([1, 2], 'ABC') # ('ABC'[1], 'ABC'[2])
('B', 'C')
Works on any value that supports indexing/getitem
For example here we see that it works with dictionaries
>>> phonebook = {'Alice': '555-1234',
... 'Bob': '555-5678',
... 'Charlie':'555-9999'}
>>> get('Alice', phonebook)
'555-1234'
>>> get(['Alice', 'Bob'], phonebook)
('555-1234', '555-5678')
Provide a default for missing values
>>> get(['Alice', 'Dennis'], phonebook, None)
('555-1234', None)
See Also:
pluck
"""
try:
return seq[ind]
except TypeError: # `ind` may be a list
if isinstance(ind, list):
if default == no_default:
if len(ind) > 1:
return operator.itemgetter(*ind)(seq)
elif ind:
return (seq[ind[0]],)
else:
return ()
else:
return tuple(_get(i, seq, default) for i in ind)
elif default != no_default:
return default
else:
raise
except (KeyError, IndexError): # we know `ind` is not a list
if default == no_default:
raise
else:
return default
def concat(seqs):
""" Concatenate zero or more iterables, any of which may be infinite.
An infinite sequence will prevent the rest of the arguments from
being included.
We use chain.from_iterable rather than ``chain(*seqs)`` so that seqs
can be a generator.
>>> list(concat([[], [1], [2, 3]]))
[1, 2, 3]
See also:
itertools.chain.from_iterable equivalent
"""
return itertools.chain.from_iterable(seqs)
def concatv(*seqs):
""" Variadic version of concat
>>> list(concatv([], ["a"], ["b", "c"]))
['a', 'b', 'c']
See also:
itertools.chain
"""
return concat(seqs)
def mapcat(func, seqs):
""" Apply func to each sequence in seqs, concatenating results.
>>> list(mapcat(lambda s: [c.upper() for c in s],
... [["a", "b"], ["c", "d", "e"]]))
['A', 'B', 'C', 'D', 'E']
"""
return concat(map(func, seqs))
def cons(el, seq):
""" Add el to beginning of (possibly infinite) sequence seq.
>>> list(cons(1, [2, 3]))
[1, 2, 3]
"""
return itertools.chain([el], seq)
def interpose(el, seq):
""" Introduce element between each pair of elements in seq
>>> list(interpose("a", [1, 2, 3]))
[1, 'a', 2, 'a', 3]
"""
combined = zip(itertools.repeat(el), seq)
return drop(1, concat(combined))
def frequencies(seq):
""" Find number of occurrences of each value in seq
>>> frequencies(['cat', 'cat', 'ox', 'pig', 'pig', 'cat']) #doctest: +SKIP
{'cat': 3, 'ox': 1, 'pig': 2}
See Also:
countby
groupby
"""
d = collections.defaultdict(int)
for item in seq:
d[item] += 1
return dict(d)
def reduceby(key, binop, seq, init=no_default):
""" Perform a simultaneous groupby and reduction
The computation:
>>> result = reduceby(key, binop, seq, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> groups = groupby(key, seq) # doctest: +SKIP
>>> result = valmap(reduction, groups) # doctest: +SKIP
But the former does not build the intermediate groups, allowing it to
operate in much less space. This makes it suitable for larger datasets
that do not fit comfortably in memory
The ``init`` keyword argument is the default initialization of the
reduction. This can be either a constant value like ``0`` or a callable
like ``lambda : 0`` as might be used in ``defaultdict``.
Simple Examples
---------------
>>> from operator import add, mul
>>> iseven = lambda x: x % 2 == 0
>>> data = [1, 2, 3, 4, 5]
>>> reduceby(iseven, add, data) # doctest: +SKIP
{False: 9, True: 6}
>>> reduceby(iseven, mul, data) # doctest: +SKIP
{False: 15, True: 8}
Complex Example
---------------
>>> projects = [{'name': 'build roads', 'state': 'CA', 'cost': 1000000},
... {'name': 'fight crime', 'state': 'IL', 'cost': 100000},
... {'name': 'help farmers', 'state': 'IL', 'cost': 2000000},
... {'name': 'help farmers', 'state': 'CA', 'cost': 200000}]
>>> reduceby('state', # doctest: +SKIP
... lambda acc, x: acc + x['cost'],
... projects, 0)
{'CA': 1200000, 'IL': 2100000}
Example Using ``init``
----------------------
>>> def set_add(s, i):
... s.add(i)
... return s
>>> reduceby(iseven, set_add, [1, 2, 3, 4, 1, 2, 3], set) # doctest: +SKIP
{True: set([2, 4]),
False: set([1, 3])}
"""
is_no_default = init == no_default
if not is_no_default and not callable(init):
_init = init
init = lambda: _init
if not callable(key):
key = getter(key)
d = {}
for item in seq:
k = key(item)
if k not in d:
if is_no_default:
d[k] = item
continue
else:
d[k] = init()
d[k] = binop(d[k], item)
return d
def iterate(func, x):
""" Repeatedly apply a function func onto an original input
Yields x, then func(x), then func(func(x)), then func(func(func(x))), etc..
>>> def inc(x): return x + 1
>>> counter = iterate(inc, 0)
>>> next(counter)
0
>>> next(counter)
1
>>> next(counter)
2
>>> double = lambda x: x * 2
>>> powers_of_two = iterate(double, 1)
>>> next(powers_of_two)
1
>>> next(powers_of_two)
2
>>> next(powers_of_two)
4
>>> next(powers_of_two)
8
"""
while True:
yield x
x = func(x)
def sliding_window(n, seq):
""" A sequence of overlapping subsequences
>>> list(sliding_window(2, [1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
This function creates a sliding window suitable for transformations like
sliding means / smoothing
>>> mean = lambda seq: float(sum(seq)) / len(seq)
>>> list(map(mean, sliding_window(2, [1, 2, 3, 4])))
[1.5, 2.5, 3.5]
"""
return zip(*(collections.deque(itertools.islice(it, i), 0) or it
for i, it in enumerate(itertools.tee(seq, n))))
no_pad = '__no__pad__'
def partition(n, seq, pad=no_pad):
""" Partition sequence into tuples of length n
>>> list(partition(2, [1, 2, 3, 4]))
[(1, 2), (3, 4)]
If the length of ``seq`` is not evenly divisible by ``n``, the final tuple
is dropped if ``pad`` is not specified, or filled to length ``n`` by pad:
>>> list(partition(2, [1, 2, 3, 4, 5]))
[(1, 2), (3, 4)]
>>> list(partition(2, [1, 2, 3, 4, 5], pad=None))
[(1, 2), (3, 4), (5, None)]
See Also:
partition_all
"""
args = [iter(seq)] * n
if pad is no_pad:
return zip(*args)
else:
return zip_longest(*args, fillvalue=pad)
def partition_all(n, seq):
""" Partition all elements of sequence into tuples of length at most n
The final tuple may be shorter to accommodate extra elements.
>>> list(partition_all(2, [1, 2, 3, 4]))
[(1, 2), (3, 4)]
>>> list(partition_all(2, [1, 2, 3, 4, 5]))
[(1, 2), (3, 4), (5,)]
See Also:
partition
"""
args = [iter(seq)] * n
it = zip_longest(*args, fillvalue=no_pad)
try:
prev = next(it)
except StopIteration:
return
for item in it:
yield prev
prev = item
if prev[-1] is no_pad:
yield prev[:prev.index(no_pad)]
else:
yield prev
def count(seq):
""" Count the number of items in seq
Like the builtin ``len`` but works on lazy sequencies.
Not to be confused with ``itertools.count``
See also:
len
"""
if hasattr(seq, '__len__'):
return len(seq)
return sum(1 for i in seq)
def pluck(ind, seqs, default=no_default):
""" plucks an element or several elements from each item in a sequence.
``pluck`` maps ``itertoolz.get`` over a sequence and returns one or more
elements of each item in the sequence.
This is equivalent to running `map(curried.get(ind), seqs)`
``ind`` can be either a single string/index or a list of strings/indices.
``seqs`` should be sequence containing sequences or dicts.
e.g.
>>> data = [{'id': 1, 'name': 'Cheese'}, {'id': 2, 'name': 'Pies'}]
>>> list(pluck('name', data))
['Cheese', 'Pies']
>>> list(pluck([0, 1], [[1, 2, 3], [4, 5, 7]]))
[(1, 2), (4, 5)]
See Also:
get
map
"""
if default == no_default:
get = getter(ind)
return map(get, seqs)
elif isinstance(ind, list):
return (tuple(_get(item, seq, default) for item in ind)
for seq in seqs)
return (_get(ind, seq, default) for seq in seqs)
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def join(leftkey, leftseq, rightkey, rightseq,
left_default=no_default, right_default=no_default):
""" Join two sequences on common attributes
This is a semi-streaming operation. The LEFT sequence is fully evaluated
and placed into memory. The RIGHT sequence is evaluated lazily and so can
be arbitrarily large.
>>> friends = [('Alice', 'Edith'),
... ('Alice', 'Zhao'),
... ('Edith', 'Alice'),
... ('Zhao', 'Alice'),
... ('Zhao', 'Edith')]
>>> cities = [('Alice', 'NYC'),
... ('Alice', 'Chicago'),
... ('Dan', 'Syndey'),
... ('Edith', 'Paris'),
... ('Edith', 'Berlin'),
... ('Zhao', 'Shanghai')]
>>> # Vacation opportunities
>>> # In what cities do people have friends?
>>> result = join(second, friends,
... first, cities)
>>> for ((a, b), (c, d)) in sorted(unique(result)):
... print((a, d))
('Alice', 'Berlin')
('Alice', 'Paris')
('Alice', 'Shanghai')
('Edith', 'Chicago')
('Edith', 'NYC')
('Zhao', 'Chicago')
('Zhao', 'NYC')
('Zhao', 'Berlin')
('Zhao', 'Paris')
Specify outer joins with keyword arguments ``left_default`` and/or
``right_default``. Here is a full outer join in which unmatched elements
are paired with None.
>>> identity = lambda x: x
>>> list(join(identity, [1, 2, 3],
... identity, [2, 3, 4],
... left_default=None, right_default=None))
[(2, 2), (3, 3), (None, 4), (1, None)]
Usually the key arguments are callables to be applied to the sequences. If
the keys are not obviously callable then it is assumed that indexing was
intended, e.g. the following is a legal change
>>> # result = join(second, friends, first, cities)
>>> result = join(1, friends, 0, cities) # doctest: +SKIP
"""
if not callable(leftkey):
leftkey = getter(leftkey)
if not callable(rightkey):
rightkey = getter(rightkey)
d = groupby(leftkey, leftseq)
seen_keys = set()
left_default_is_no_default = (left_default == no_default)
for item in rightseq:
key = rightkey(item)
seen_keys.add(key)
try:
left_matches = d[key]
for match in left_matches:
yield (match, item)
except KeyError:
if not left_default_is_no_default:
yield (left_default, item)
if right_default != no_default:
for key, matches in d.items():
if key not in seen_keys:
for match in matches:
yield (match, right_default)
def diff(*seqs, **kwargs):
""" Return those items that differ between sequences
>>> list(diff([1, 2, 3], [1, 2, 10, 100]))
[(3, 10)]
Shorter sequences may be padded with a ``default`` value:
>>> list(diff([1, 2, 3], [1, 2, 10, 100], default=None))
[(3, 10), (None, 100)]
A ``key`` function may also be applied to each item to use during
comparisons:
>>> list(diff(['apples', 'bananas'], ['Apples', 'Oranges'], key=str.lower))
[('bananas', 'Oranges')]
"""
N = len(seqs)
if N == 1 and isinstance(seqs[0], list):
seqs = seqs[0]
N = len(seqs)
if N < 2:
raise TypeError('Too few sequences given (min 2 required)')
default = kwargs.get('default', no_default)
if default == no_default:
iters = zip(*seqs)
else:
iters = zip_longest(*seqs, fillvalue=default)
key = kwargs.get('key', None)
if key is None:
for items in iters:
if items.count(items[0]) != N:
yield items
else:
for items in iters:
vals = tuple(map(key, items))
if vals.count(vals[0]) != N:
yield items
def topk(k, seq, key=None):
""" Find the k largest elements of a sequence
Operates lazily in ``n*log(k)`` time
>>> topk(2, [1, 100, 10, 1000])
(1000, 100)
Use a key function to change sorted order
>>> topk(2, ['Alice', 'Bob', 'Charlie', 'Dan'], key=len)
('Charlie', 'Alice')
See also:
heapq.nlargest
"""
if key is not None and not callable(key):
key = getter(key)
return tuple(heapq.nlargest(k, seq, key=key))
def peek(seq):
""" Retrieve the next element of a sequence
Returns the first element and an iterable equivalent to the original
sequence, still having the element retrieved.
>>> seq = [0, 1, 2, 3, 4]
>>> first, seq = peek(seq)
>>> first
0
>>> list(seq)
[0, 1, 2, 3, 4]
"""
iterator = iter(seq)
item = next(iterator)
return item, itertools.chain([item], iterator)
def random_sample(prob, seq, random_state=None):
""" Return elements from a sequence with probability of prob
Returns a lazy iterator of random items from seq.
``random_sample`` considers each item independently and without
replacement. See below how the first time it returned 13 items and the
next time it returned 6 items.
>>> seq = list(range(100))
>>> list(random_sample(0.1, seq)) # doctest: +SKIP
[6, 9, 19, 35, 45, 50, 58, 62, 68, 72, 78, 86, 95]
>>> list(random_sample(0.1, seq)) # doctest: +SKIP
[6, 44, 54, 61, 69, 94]
Providing an integer seed for ``random_state`` will result in
deterministic sampling. Given the same seed it will return the same sample
every time.
>>> list(random_sample(0.1, seq, random_state=2016))
[7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98]
>>> list(random_sample(0.1, seq, random_state=2016))
[7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98]
``random_state`` can also be any object with a method ``random`` that
returns floats between 0.0 and 1.0 (exclusive).
>>> from random import Random
>>> randobj = Random(2016)
>>> list(random_sample(0.1, seq, random_state=randobj))
[7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98]
"""
if not hasattr(random_state, 'random'):
random_state = Random(random_state)
return filter(lambda _: random_state.random() < prob, seq)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import os
import win32api
import win32con
import win32gui_struct
try:
import winxpgui as win32gui
except ImportError:
import win32gui
import itertools
import glob
from ava.shell import resource_path
from ava.shell.base import ShellBase, STR_EXIT, STR_OPEN_WEBFRONT, STR_STATUS
class MainFrame(object):
def __init__(self, message_map):
self.window_class_name = "EAvatarWnd"
self.hinst = None
self.class_atom = self.register_wnd_class(message_map)
self.hwnd = self.create_window()
def register_wnd_class(self, message_map):
# Register the Window class.
window_class = win32gui.WNDCLASS()
self.hinst = window_class.hInstance = win32gui.GetModuleHandle(None)
window_class.lpszClassName = self.window_class_name
window_class.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
window_class.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
window_class.hbrBackground = win32con.COLOR_WINDOW
window_class.lpfnWndProc = message_map # could also specify a wndproc.
return win32gui.RegisterClass(window_class)
def create_window(self):
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
hwnd = win32gui.CreateWindow(self.class_atom,
self.window_class_name,
style,
0,
0,
310,
250,
0,
0,
self.hinst,
None)
win32gui.UpdateWindow(hwnd)
return hwnd
def show(self):
win32gui.ShowWindow(self.hwnd, win32con.SW_NORMAL)
def close(self):
win32gui.PostQuitMessage(0)
_QUIT = 'QUIT'
_FIRST_ID = 1023
_ID_OPEN_FRAME = 1024
_ID_QUIT = 1026
class StatusIcon(object):
def __init__(self, s):
self.shell = s
self.icons = itertools.cycle(glob.glob(resource_path('res/*.ico')))
self.hover_text =STR_STATUS
self.menu_options = ((STR_OPEN_WEBFRONT, None, None, _ID_OPEN_FRAME),
("-", None, None, 1025),
(STR_EXIT, None, None, _ID_QUIT),)
self.icon = self.icons.next()
self.default_menu_index = 0
self.notify_id = None
self.hicon = None
self.refresh_icon()
def notify(self, title, message):
balloon_id = (self.shell.main_frame.hwnd,
0,
win32gui.NIF_INFO,
win32con.WM_USER+20,
self.hicon,
self.hover_text,
title,
200,
message)
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, balloon_id)
def refresh_icon(self):
# Try and find a custom icon
hinst = win32gui.GetModuleHandle(None)
if os.path.isfile(self.icon):
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
self.hicon = win32gui.LoadImage(hinst,
self.icon,
win32con.IMAGE_ICON,
0,
0,
icon_flags)
else:
print("Can't find icon file - using default.")
self.hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
if self.notify_id:
message = win32gui.NIM_MODIFY
else:
message = win32gui.NIM_ADD
self.notify_id = (self.shell.main_frame.hwnd,
0,
win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP,
win32con.WM_USER+20,
self.hicon,
self.hover_text)
win32gui.Shell_NotifyIcon(message, self.notify_id)
def show_menu(self):
menu = win32gui.CreatePopupMenu()
self.create_menu(menu, self.menu_options)
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
win32gui.SetForegroundWindow(self.shell.main_frame.hwnd)
win32gui.TrackPopupMenu(menu,
win32con.TPM_LEFTALIGN,
pos[0],
pos[1],
0,
self.shell.main_frame.hwnd,
None)
def create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, option_id in menu_options[::-1]:
if option_icon:
option_icon = self.prep_menu_icon(option_icon)
if option_text == "-":
win32gui.InsertMenu(menu, 0, win32con.MF_BYPOSITION, win32con.MF_SEPARATOR, None)
else:
item, extras = win32gui_struct.PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id)
win32gui.InsertMenuItem(menu, 0, 1, item)
def prep_menu_icon(self, icon):
# First load the icon.
ico_x = win32api.GetSystemMetrics(win32con.SM_CXSMICON)
ico_y = win32api.GetSystemMetrics(win32con.SM_CYSMICON)
hicon = win32gui.LoadImage(0, icon, win32con.IMAGE_ICON, ico_x, ico_y, win32con.LR_LOADFROMFILE)
hdcBitmap = win32gui.CreateCompatibleDC(0)
hdcScreen = win32gui.GetDC(0)
hbm = win32gui.CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = win32gui.SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = win32gui.GetSysColorBrush(win32con.COLOR_MENU)
win32gui.FillRect(hdcBitmap, (0, 0, 16, 16), brush)
# unclear if brush needs to be feed. Best clue I can find is:
# "GetSysColorBrush returns a cached brush instead of allocating a new
# one." - implies no DeleteObject
# draw the icon
win32gui.DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, win32con.DI_NORMAL)
win32gui.SelectObject(hdcBitmap, hbmOld)
win32gui.DeleteDC(hdcBitmap)
return hbm
def switch_icon(self):
self.icon = self.icons.next()
self.refresh_icon()
class Shell(ShellBase):
def __init__(self):
super(Shell, self).__init__()
msg_taskbar_restart = win32gui.RegisterWindowMessage("TaskbarCreated")
self.message_map = {msg_taskbar_restart: self.OnRestart,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_USER+20: self.OnTaskbarNotify,}
self.main_frame = MainFrame(self.message_map)
self.status_icon = StatusIcon(self)
def run(self):
#while not win32gui.PumpWaitingMessages():
# time.sleep(0.1)
win32gui.PumpMessages()
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32gui.LOWORD(wparam)
self.execute_menu_option(id)
def OnRestart(self, hwnd, msg, wparam, lparam):
self.status_icon.refresh_icon()
def OnDestroy(self, hwnd, msg, wparam, lparam):
self.stop_server()
nid = (self.main_frame.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32gui.PostQuitMessage(0) # Terminate the app.
def OnTaskbarNotify(self, hwnd, msg, wparam, lparam):
if lparam == win32con.WM_LBUTTONDBLCLK:
self.status_icon.execute_menu_option(self.FIRST_ID)
elif lparam == win32con.WM_RBUTTONUP:
self.status_icon.show_menu()
elif lparam == win32con.WM_LBUTTONUP:
pass
return True
def execute_menu_option(self, id):
if id == _ID_QUIT:
win32gui.DestroyWindow(self.main_frame.hwnd)
elif id == _ID_OPEN_FRAME:
self.open_main_ui()
if __name__ == '__main__':
shell = Shell()
shell.run()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the EC2 Credentials service.
This service allows the creation of access/secret credentials used for
the ec2 interop layer of OpenStack.
A user can create as many access/secret pairs, each of which map to a
specific project. This is required because OpenStack supports a user
belonging to multiple projects, whereas the signatures created on ec2-style
requests don't allow specification of which project the user wishes to act
upon.
To complete the cycle, we provide a method that OpenStack services can
use to validate a signature and get a corresponding OpenStack token. This
token allows method calls to other services within the context the
access/secret was created. As an example, Nova requests Keystone to validate
the signature of a request, receives a token, and then makes a request to
Glance to list images needed to perform the requested task.
"""
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone import exception
from keystone import token
@dependency.requires('catalog_api', 'credential_api', 'token_provider_api')
class Ec2Controller(controller.V2Controller):
def check_signature(self, creds_ref, credentials):
signer = ec2_utils.Ec2Signer(creds_ref['secret'])
signature = signer.generate(credentials)
if utils.auth_str_equal(credentials['signature'], signature):
return
# NOTE(vish): Some libraries don't use the port when signing
# requests, so try again without port.
elif ':' in credentials['signature']:
hostname, _port = credentials['host'].split(':')
credentials['host'] = hostname
signature = signer.generate(credentials)
if not utils.auth_str_equal(credentials.signature, signature):
raise exception.Unauthorized(message='Invalid EC2 signature.')
else:
raise exception.Unauthorized(message='EC2 signature not supplied.')
def authenticate(self, context, credentials=None, ec2Credentials=None):
"""Validate a signed EC2 request and provide a token.
Other services (such as Nova) use this **admin** call to determine
if a request they signed received is from a valid user.
If it is a valid signature, an OpenStack token that maps
to the user/tenant is returned to the caller, along with
all the other details returned from a normal token validation
call.
The returned token is useful for making calls to other
OpenStack services within the context of the request.
:param context: standard context
:param credentials: dict of ec2 signature
:param ec2Credentials: DEPRECATED dict of ec2 signature
:returns: token: OpenStack token equivalent to access key along
with the corresponding service catalog and roles
"""
# FIXME(ja): validate that a service token was used!
# NOTE(termie): backwards compat hack
if not credentials and ec2Credentials:
credentials = ec2Credentials
if 'access' not in credentials:
raise exception.Unauthorized(message='EC2 signature not supplied.')
creds_ref = self._get_credentials(credentials['access'])
self.check_signature(creds_ref, credentials)
# TODO(termie): don't create new tokens every time
# TODO(termie): this is copied from TokenController.authenticate
token_id = uuid.uuid4().hex
tenant_ref = self.identity_api.get_project(creds_ref['tenant_id'])
user_ref = self.identity_api.get_user(creds_ref['user_id'])
metadata_ref = {}
metadata_ref['roles'] = (
self.identity_api.get_roles_for_user_and_project(
user_ref['id'], tenant_ref['id']))
# Validate that the auth info is valid and nothing is disabled
token.validate_auth_info(self, user_ref, tenant_ref)
roles = metadata_ref.get('roles', [])
if not roles:
raise exception.Unauthorized(message='User not valid for tenant.')
roles_ref = [self.identity_api.get_role(role_id)
for role_id in roles]
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The token provider doesn't actually expect either v2 or v3 user data.
user_ref = self.identity_api.v3_to_v2_user(user_ref)
auth_token_data = dict(user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref,
id='placeholder')
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref, catalog_ref)
return token_data
def create_credential(self, context, user_id, tenant_id):
"""Create a secret/access pair for use with ec2 style auth.
Generates a new set of credentials that map the user/tenant
pair.
:param context: standard context
:param user_id: id of user
:param tenant_id: id of tenant
:returns: credential: dict of ec2 credential
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(user_id)
self._assert_valid_project_id(tenant_id)
blob = {'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex}
credential_id = utils.hash_access_key(blob['access'])
cred_ref = {'user_id': user_id,
'project_id': tenant_id,
'blob': blob,
'id': credential_id,
'type': 'ec2'}
self.credential_api.create_credential(credential_id, cred_ref)
return {'credential': self._convert_v3_to_ec2_credential(cred_ref)}
def get_credentials(self, context, user_id):
"""List all credentials for a user.
:param context: standard context
:param user_id: id of user
:returns: credentials: list of ec2 credential dicts
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(user_id)
credential_refs = self.credential_api.list_credentials(
user_id=user_id)
return {'credentials':
[self._convert_v3_to_ec2_credential(credential)
for credential in credential_refs]}
def get_credential(self, context, user_id, credential_id):
"""Retrieve a user's access/secret pair by the access key.
Grab the full access/secret pair for a given access key.
:param context: standard context
:param user_id: id of user
:param credential_id: access key for credentials
:returns: credential: dict of ec2 credential
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(user_id)
return {'credential': self._get_credentials(credential_id)}
def delete_credential(self, context, user_id, credential_id):
"""Delete a user's access/secret pair.
Used to revoke a user's access/secret pair
:param context: standard context
:param user_id: id of user
:param credential_id: access key for credentials
:returns: bool: success
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_owner(user_id, credential_id)
self._assert_valid_user_id(user_id)
self._get_credentials(credential_id)
ec2_credential_id = utils.hash_access_key(credential_id)
return self.credential_api.delete_credential(ec2_credential_id)
def _convert_v3_to_ec2_credential(self, credential):
blob = credential['blob']
return {'user_id': credential.get('user_id'),
'tenant_id': credential.get('project_id'),
'access': blob.get('access'),
'secret': blob.get('secret')}
def _get_credentials(self, credential_id):
"""Return credentials from an ID.
:param credential_id: id of credential
:raises exception.Unauthorized: when credential id is invalid
:returns: credential: dict of ec2 credential.
"""
ec2_credential_id = utils.hash_access_key(credential_id)
creds = self.credential_api.get_credential(ec2_credential_id)
if not creds:
raise exception.Unauthorized(message='EC2 access key not found.')
return self._convert_v3_to_ec2_credential(creds)
def _assert_identity(self, context, user_id):
"""Check that the provided token belongs to the user.
:param context: standard context
:param user_id: id of user
:raises exception.Forbidden: when token is invalid
"""
try:
token_ref = self.token_api.get_token(context['token_id'])
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
if token_ref['user'].get('id') != user_id:
raise exception.Forbidden(_('Token belongs to another user'))
def _is_admin(self, context):
"""Wrap admin assertion error return statement.
:param context: standard context
:returns: bool: success
"""
try:
self.assert_admin(context)
return True
except exception.Forbidden:
return False
def _assert_owner(self, user_id, credential_id):
"""Ensure the provided user owns the credential.
:param user_id: expected credential owner
:param credential_id: id of credential object
:raises exception.Forbidden: on failure
"""
cred_ref = self.credential_api.get_credential(credential_id)
if user_id != cred_ref['user_id']:
raise exception.Forbidden(_('Credential belongs to another user'))
def _assert_valid_user_id(self, user_id):
"""Ensure a valid user id.
:param context: standard context
:param user_id: expected credential owner
:raises exception.UserNotFound: on failure
"""
user_ref = self.identity_api.get_user(user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
def _assert_valid_project_id(self, project_id):
"""Ensure a valid project id.
:param context: standard context
:param project_id: expected project
:raises exception.ProjectNotFound: on failure
"""
project_ref = self.identity_api.get_project(project_id)
if not project_ref:
raise exception.ProjectNotFound(project_id=project_id)
| |
from __future__ import unicode_literals
import logging
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _, ugettext
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.contrib.redirects.models import Redirect
from django.core.validators import validate_slug
from django.core.exceptions import ValidationError
from django.utils.encoding import python_2_unicode_compatible
from django.db.utils import IntegrityError
from app_data import AppDataField
from ella.core.box import Box
from ella.core.cache import (
CachedGenericForeignKey,
CachedForeignKey,
ContentTypeForeignKey,
CategoryForeignKey,
get_cached_object,
)
from ella.core.conf import core_settings
from ella.core.managers import ListingManager, RelatedManager, \
PublishableManager
from ella.core.models.main import Author, Source
from ella.core.signals import content_published, content_unpublished
from ella.utils.timezone import now, localize
log = logging.getLogger('ella.core.models.publishable')
def PublishableBox(publishable, box_type, nodelist, model=None):
"add some content type info of self.target"
if not model:
model = publishable.content_type.model_class()
box_class = model.box_class
if box_class == PublishableBox:
box_class = Box
return box_class(publishable, box_type, nodelist, model=model)
@python_2_unicode_compatible
class Publishable(models.Model):
"""
Base class for all objects that can be published in Ella.
"""
box_class = staticmethod(PublishableBox)
content_type = ContentTypeForeignKey(editable=False)
target = CachedGenericForeignKey('content_type', 'id')
category = CategoryForeignKey(verbose_name=_('Category'))
# Titles
title = models.CharField(_('Title'), max_length=255)
slug = models.SlugField(_('Slug'), max_length=255, validators=[validate_slug])
# Authors and Sources
authors = models.ManyToManyField(Author, verbose_name=_('Authors'))
source = CachedForeignKey(Source, blank=True, null=True,
verbose_name=_('Source'), on_delete=models.SET_NULL)
# Main Photo
photo = CachedForeignKey('photos.Photo', blank=True, null=True, on_delete=models.SET_NULL,
verbose_name=_('Photo'))
# Description
description = models.TextField(_('Description'), blank=True)
# Publish data
published = models.BooleanField(_('Published'), default=False)
publish_from = models.DateTimeField(_('Publish from'),
default=core_settings.PUBLISH_FROM_WHEN_EMPTY, db_index=True)
publish_to = models.DateTimeField(_("End of visibility"), null=True, blank=True)
static = models.BooleanField(_('static'), default=False)
# Last updated
last_updated = models.DateTimeField(_('Last updated'), blank=True)
# generic JSON field to store app cpecific data
app_data = AppDataField(default='{}', editable=False)
# has the content_published signal been sent for this instance?
announced = models.BooleanField(help_text='Publish signal sent', default=False, editable=False)
objects = PublishableManager()
class Meta:
app_label = 'core'
verbose_name = _('Publishable object')
verbose_name_plural = _('Publishable objects')
def __str__(self):
return self.title
def __eq__(self, other):
return isinstance(other, Publishable) and self.pk == other.pk
def get_absolute_url(self, domain=False):
" Get object's URL. "
category = self.category
kwargs = {
'slug': self.slug,
}
if self.static:
kwargs['id'] = self.pk
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('static_detail', kwargs=kwargs)
else:
url = reverse('home_static_detail', kwargs=kwargs)
else:
publish_from = localize(self.publish_from)
kwargs.update({
'year': publish_from.year,
'month': publish_from.month,
'day': publish_from.day,
})
if category.tree_parent_id:
kwargs['category'] = category.tree_path
url = reverse('object_detail', kwargs=kwargs)
else:
url = reverse('home_object_detail', kwargs=kwargs)
if category.site_id != settings.SITE_ID or domain:
return 'http://' + category.site.domain + url
return url
def get_domain_url(self):
return self.get_absolute_url(domain=True)
def validate_get_absolute_url_for_redirection(self):
"""
Validate get absolute url for usage in Redirect table to prevent
errors in save method
"""
if self.static and not self.pk:
return
if not self.category_id or not self.publish_from or not self.slug:
return
url_changed = True
if self.pk:
try:
old_self = get_cached_object(self.__class__, pk=self.pk)
except self.__class__.DoesNotExist:
pass
else:
old_path = old_self.get_absolute_url()
new_path = self.get_absolute_url()
url_changed = not bool(old_path == new_path)
if url_changed and len(self.get_absolute_url()) > Redirect._meta.get_field("new_path").max_length:
raise ValidationError(_('Object url is too long to use in redirect table, please cut slug'))
def clean(self):
self.validate_get_absolute_url_for_redirection()
if self.static or not self.published:
return
# fields are missing, validating uniqueness is pointless
if not self.category_id or not self.publish_from or not self.slug:
return
# used Publishable instead of self.__class__ becouse we want uniqueness
# in Publishale qs not in descendant qs only (for emaple: Article qs)
qset = Publishable.objects.filter(
category=self.category,
published=True,
publish_from__day=self.publish_from.day,
publish_from__month=self.publish_from.month,
publish_from__year=self.publish_from.year,
slug=self.slug
)
if self.pk:
qset = qset.exclude(pk=self.pk)
if qset:
raise ValidationError(_('Another %s already published at this URL.') % Publishable._meta.verbose_name)
def save(self, **kwargs):
# update the content_type if it isn't already set
if not self.content_type_id:
self.content_type = ContentType.objects.get_for_model(self)
send_signal = None
old_self = None
if self.pk:
try:
old_self = self.__class__.objects.get(pk=self.pk)
except Publishable.DoesNotExist:
pass
if old_self:
old_path = old_self.get_absolute_url()
new_path = self.get_absolute_url()
# detect change in URL and not a static one
if old_path != new_path and new_path and not old_self.static:
# and create a redirect
try:
redirect = Redirect.objects.get_or_create(old_path=old_path,
site=self.category.site)[0]
redirect.new_path = new_path
redirect.save(force_update=True)
# also update all potentially already existing redirects
Redirect.objects.filter(new_path=old_path).exclude(
pk=redirect.pk).update(new_path=new_path)
except IntegrityError:
log.error(
"Can not create redirect from %s to %s" % (old_path, new_path),
exc_info=True
)
# detect change in publication status
if old_self.is_published() != self.is_published():
if self.is_published():
send_signal = content_published
self.announced = True
else:
send_signal = content_unpublished
self.announced = False
# @note: We also need to check for `published` flag even if both
# old and new self `is_published()` method returns false.
# This method can report false since we might be in time *before*
# publication should take place but we still need to fire signal
# that content has been unpublished.
if old_self.published != self.published and self.published is False:
send_signal= content_unpublished
self.announced = False
# changed publish_from and last_updated was default, change it too
if old_self.last_updated == old_self.publish_from and self.last_updated == old_self.last_updated:
self.last_updated = self.publish_from
#TODO: shift Listing in case publish_(to|from) changes
# published, send the proper signal
elif self.is_published():
send_signal = content_published
self.announced = True
if not self.last_updated:
self.last_updated = self.publish_from
super(Publishable, self).save(**kwargs)
if send_signal:
send_signal.send(sender=self.__class__, publishable=self)
def delete(self):
url = self.get_absolute_url()
Redirect.objects.filter(new_path=url).delete()
if self.announced:
content_unpublished.send(sender=self.__class__, publishable=self)
return super(Publishable, self).delete()
def is_published(self):
"Return True if the Publishable is currently active."
cur_time = now()
return self.published and cur_time > self.publish_from and \
(self.publish_to is None or cur_time < self.publish_to)
def ListingBox(listing, *args, **kwargs):
" Delegate the boxing to the target's Box class. "
obj = listing.publishable
return obj.box_class(obj, *args, **kwargs)
@python_2_unicode_compatible
class Listing(models.Model):
"""
Listing of an ``Publishable`` in a ``Category``. Each and every object that have it's
own detail page must have a ``Listing`` object that is valid (not expired) and
places it in the object's main category. Any object can be listed in any
number of categories (but only once per category). Even if the object is
listed in other categories besides its main category, its detail page's url
still belongs to the main one.
"""
box_class = staticmethod(ListingBox)
publishable = CachedForeignKey(Publishable, verbose_name=_('Publishable'))
category = CategoryForeignKey(verbose_name=_('Category'), db_index=True)
publish_from = models.DateTimeField(_("Start of listing"), db_index=True)
publish_to = models.DateTimeField(_("End of listing"), null=True, blank=True)
commercial = models.BooleanField(_("Commercial"), default=False,
help_text=_("Check this if the listing is of a commercial content."))
objects = ListingManager()
class Meta:
app_label = 'core'
verbose_name = _('Listing')
verbose_name_plural = _('Listings')
def __str__(self):
try:
return ugettext('%(pub)s listed in %(cat)s') % {'pub': self.publishable, 'cat': self.category}
except:
return ugettext('Broken listing')
def clean(self):
if not self.publishable_id:
return
if self.publish_from and self.publish_from < self.publishable.publish_from:
raise ValidationError(_('A publishable cannot be listed before it\'s published.'))
if self.publishable.publish_to:
if not self.publish_to or self.publish_to > self.publishable.publish_to:
raise ValidationError(_('A publishable cannot be listed longer than it\'s published.'))
def get_absolute_url(self, domain=False):
return self.publishable.get_absolute_url(domain)
def get_domain_url(self):
return self.get_absolute_url(domain=True)
@python_2_unicode_compatible
class Related(models.Model):
"""
Related objects - model for recording related ``Publishable`` objects.
An example would be two articles sharing a similar topic. When something
like this happens, a ``Related`` instance connecting the objects should
be created.
"""
publishable = CachedForeignKey(Publishable, verbose_name=_('Publishable'))
related_ct = ContentTypeForeignKey(verbose_name=_('Content type'))
related_id = models.IntegerField(_('Object ID'))
related = CachedGenericForeignKey('related_ct', 'related_id')
objects = RelatedManager()
class Meta:
app_label = 'core'
verbose_name = _('Related')
verbose_name_plural = _('Related')
def __str__(self):
return _('%(pub)s relates to %(rel)s') % {'pub': self.publishable, 'rel': self.related}
| |
# -*- coding: utf-8 -*-
import hmac
import json
import time
import unittest
from base64 import b64encode, b64decode
from aiorest_ws.auth.token.managers import JSONWebTokenManager
from aiorest_ws.auth.token.exceptions import ParsingTokenException, \
InvalidSignatureException, TokenNotBeforeException, TokenExpiredException
class JSONWebTokenManagerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.json_manager = JSONWebTokenManager()
def _generate_token_timestamp(self, delta):
current_time_in_seconds = int(time.time())
token_timestamp = current_time_in_seconds + delta
return token_timestamp
def test_encode_data(self):
data = {'key': 'value'}
utf8_data = json.dumps(data).encode('utf-8')
encoded_data = b64encode(utf8_data).decode('utf-8')
self.assertEqual(self.json_manager._encode_data(data), encoded_data)
def test_decode_data(self):
encoded_data = 'eyJrZXkiOiAidmFsdWUifQ==' # {'key': 'value'}
decoded_data = b64decode(encoded_data).decode('utf-8')
data = json.loads(decoded_data)
self.assertEqual(self.json_manager._decode_data(encoded_data), data)
def test_generate_header(self):
encoded_header = self.json_manager._encode_data(
{"typ": "JWT", "alg": self.json_manager.HASH_ALGORITHM}
)
self.assertEqual(self.json_manager._generate_header(), encoded_header)
def test_generate_payload(self):
data = {'key': 'value'}
encoded_data = self.json_manager._encode_data(data)
self.assertEqual(
self.json_manager._generate_payload(data),
encoded_data
)
def test_generate_signature(self):
data = {'key': 'value'}
header = self.json_manager._generate_header()
payload = self.json_manager._generate_payload(data)
key = self.json_manager.SECRET_KEY.encode('utf-8')
data = "{0}.{1}".format(header, payload).encode('utf-8')
hash_func = self.json_manager.HASH_FUNCTIONS[
self.json_manager.HASH_ALGORITHM
]
hmac_obj = hmac.new(key, data, digestmod=hash_func)
digest = hmac_obj.hexdigest().encode('utf-8')
generated_signature = b64encode(digest).decode('utf-8')
self.assertEqual(
self.json_manager._generate_signature(header, payload),
generated_signature
)
def test_used_reserved_keys_1(self):
user_data = {
'iss': None,
'sub': None,
'aud': None,
'exp': None,
}
self.assertEqual(
self.json_manager._used_reserved_keys(user_data),
set(user_data.keys())
)
def test_used_reserved_keys_2(self):
user_data = {'my_field': 'value'}
self.assertEqual(
self.json_manager._used_reserved_keys(user_data),
set({})
)
def test_check_token_with_timestamp_lesser_current_time(self):
token = {'exp': self._generate_token_timestamp(-5)}
self.assertTrue(self.json_manager._check_token_timestamp(token, 'exp'))
def test_check_token_with_timestamp_greater_current_time(self):
token = {'exp': self._generate_token_timestamp(5)}
self.assertFalse(
self.json_manager._check_token_timestamp(token, 'exp')
)
def test_check_token_without_defined_key(self):
token = {}
self.assertFalse(
self.json_manager._check_token_timestamp(token, 'exp')
)
def test_is_invalid_signature_return_true(self):
data = {'key': 'value'}
header = self.json_manager._generate_header()
payload = self.json_manager._generate_payload(data)
signature = self.json_manager._generate_signature(header, payload)
signature = '=' + signature[1:]
self.assertTrue(
self.json_manager._is_invalid_signature(header, payload, signature)
)
def test_is_invalid_signature_return_false(self):
data = {'key': 'value'}
header = self.json_manager._generate_header()
payload = self.json_manager._generate_payload(data)
signature = self.json_manager._generate_signature(header, payload)
self.assertFalse(
self.json_manager._is_invalid_signature(header, payload, signature)
)
def test_is_not_be_accepted_return_true(self):
token = {'nbf': self._generate_token_timestamp(-5)}
self.assertTrue(self.json_manager._is_not_be_accepted(token))
def test_is_not_be_accepted_return_false(self):
token = {'nbf': self._generate_token_timestamp(5)}
self.assertFalse(self.json_manager._is_not_be_accepted(token))
def test_is_not_be_accepted_return_false_when_no_nfb_key(self):
token = {}
self.assertFalse(self.json_manager._is_not_be_accepted(token))
def test_is_expired_token_return_true(self):
token = {'exp': self._generate_token_timestamp(-5)}
self.assertTrue(self.json_manager._is_expired_token(token))
def test_is_expired_token_return_false(self):
token = {'exp': self._generate_token_timestamp(5)}
self.assertFalse(self.json_manager._is_expired_token(token))
def test_is_expired_token_return_false_when_no_exp_key(self):
token = {}
self.assertFalse(self.json_manager._is_expired_token(token))
def test_set_reserved_attribute(self):
token_attributes = {
'iss': 'test',
'sub': 'test',
'aud': 'test',
'exp': 5,
'nbf': 5,
'ait': 'test',
'jti': 'test',
}
token = {}
for attribute, value in token_attributes.items():
self.json_manager.set_reserved_attribute(token, attribute, value)
if attribute in ['exp', 'nbf']:
timestamp = self._generate_token_timestamp(value)
self.assertEqual(token[attribute], timestamp)
else:
self.assertEqual(token[attribute], value)
def test_set_reserved_attribute_for_none_reserved_attribute(self):
token = {}
self.json_manager.set_reserved_attribute(token, 'test_attr', None)
self.assertEqual(token, {})
def test_generate(self):
test_data = {'key': 'value'}
self.json_manager.set_reserved_attribute(test_data, 'iss', 'test')
header = self.json_manager._generate_header()
payload = self.json_manager._generate_payload(test_data)
signature = self.json_manager._generate_signature(header, payload)
token = "{0}.{1}.{2}".format(header, payload, signature)
test_token_data = {'key': 'value'}
self.assertEqual(
self.json_manager.generate(test_token_data, iss='test'),
token
)
def test_verify(self):
data = {'key': 'value'}
token = self.json_manager.generate(data)
self.assertEqual(self.json_manager.verify(token), data)
def test_raised_parsing_token_exception_in_verify(self):
data = {'key': 'value'}
token = self.json_manager.generate(data) + '.'
self.assertRaises(
ParsingTokenException,
self.json_manager.verify, token
)
def test_raised_invalid_signature_exception_in_verify(self):
data = {'key': 'value'}
token = self.json_manager.generate(data)
header, payload, signature = token.split('.')
signature = '=' + signature[1:]
token = "{0}.{1}.{2}".format(header, payload, signature)
self.assertRaises(
InvalidSignatureException,
self.json_manager.verify, token
)
def test_raised_token_not_before_exception_in_verify(self):
data = {'key': 'value'}
token = self.json_manager.generate(data, nbf=-5)
self.assertRaises(
TokenNotBeforeException,
self.json_manager.verify, token
)
def test_raised_token_expired_exception_in_verify(self):
data = {'key': 'value'}
token = self.json_manager.generate(data, exp=-5)
self.assertRaises(
TokenExpiredException,
self.json_manager.verify, token
)
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main CANINE model and related functions."""
from typing import Optional, Sequence, Text
import dataclasses
from language.canine import bert_modeling
from language.canine import config_utils
from language.canine import local_attention
from language.canine import tensor_contracts as tc
import tensorflow.compat.v1 as tf
# Support up to 16 hash functions.
_PRIMES = [
31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223
]
# While this should generally match `sys.maxunicode`, we want to provide this
# as a constant to avoid architecture/system-dependent array overruns.
LARGEST_CODEPOINT = 0x10ffff # Decimal: 1,114,111
@dataclasses.dataclass
class CanineModelConfig(config_utils.Config):
"""Configuration for `CanineModel`."""
# Character config:
downsampling_rate: int = 4
upsampling_kernel_size: int = 4
num_hash_functions: int = 8
num_hash_buckets: int = 16384
local_transformer_stride: int = 128 # Good TPU/XLA memory alignment.
# Vanilla BERT config:
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
intermediate_size: int = 3072
hidden_act: Text = "gelu"
hidden_dropout_prob: float = 0.1
attention_probs_dropout_prob: float = 0.1
type_vocab_size: int = 16
max_positions: int = 16384
initializer_range: float = 0.02
@tc.contract(
tc.Require("a", shape=["batch", "seq", "dim"]),
tc.Require("b", shape=["batch", "seq", "dim"]),
tc.NamedDim("batch", "a", 0),
tc.NamedDim("seq", "a", 1),
tc.NamedDim("dim", "a", 2))
def _safe_add(a: tf.Tensor, b: tf.Tensor) -> tf.Tensor:
return a + b
def _is_valid_codepoint(codepoints: tf.Tensor) -> tf.Tensor:
return tf.logical_and(codepoints >= 0, codepoints <= LARGEST_CODEPOINT)
class CanineModel:
"""Main model for CANINE. See constructor for details."""
def __init__(self,
config: CanineModelConfig,
atom_input_ids: tf.Tensor,
atom_input_mask: tf.Tensor,
atom_segment_ids: tf.Tensor,
is_training: bool,
final_seq_char_positions: Optional[tf.Tensor] = None):
"""Creates a `CanineModel`.
This interface mirrors the `BertModel` class from the public BERT code, but
abstracts away what type of input is passed (tokens, characters, etc.).
A config file can be loaded like so:
```
config = CanineModelConfig.from_json_file("/path/to.json")
```
Args:
config: Instance of `CanineModelConfig`.
atom_input_ids: <int32>[batch_size, atom_seq_len] Vocabulary ids of the
inputs.
atom_input_mask: <int32>[batch_size, atom_seq_len] Indicates which input
ids are non-padding.
atom_segment_ids: <int32>[batch_size, atom_seq_len] Indicates the type of
each feature. For a traditional BERT model with two segments, this would
contain segment ids (0 and 1).
is_training: Are we training? If not, disable dropout.
final_seq_char_positions: Optional indices within each character sequence
to be predicted by MLM. If specified, causes `get_sequence_output` to
return only those positions, and, more importantly, when using a
transformer for the `final_char_encoding`, only those sequence positions
will be used as query positions for the transformer, giving a
substantial boost in pre-training speed.
<int32>[batch_size, max_predictions_per_seq]
"""
self.config: CanineModelConfig = config
self._is_training: bool = is_training
if final_seq_char_positions is not None:
batch_size, predictions_len = bert_modeling.get_shape_list(
final_seq_char_positions)
self._final_char_seq_length: tf.Tensor = predictions_len
else:
batch_size, char_seq_length = bert_modeling.get_shape_list(atom_input_ids)
self._final_char_seq_length: tf.Tensor = char_seq_length
self._batch_size = batch_size
config.validate()
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
batch_size, char_seq_length = bert_modeling.get_shape_list(atom_input_ids)
del batch_size # Unused.
# `molecule_seq_length`: scalar int.
molecule_seq_length = char_seq_length // config.downsampling_rate
# Create attention masks...
# `char_attention_mask`: <float>[batch, char_seq, char_seq]
char_attention_mask = bert_modeling.create_attention_mask_from_input_mask(
atom_input_ids, atom_input_mask)
# ...for attending from deep BERT molecule stack back to initial characters:
# `molecule_to_char_attention_mask`: <float>[batch, molecule_seq, char_seq]
molecule_to_char_attention_mask = self.downsample_attention_mask(
char_attention_mask, config.downsampling_rate, dim=-2)
# ...for self-attention within deep BERT molecule stack:
# `molecule_attention_mask`: <float>[batch, molecule_seq, molecule_seq]
molecule_attention_mask = self.downsample_attention_mask(
molecule_to_char_attention_mask, config.downsampling_rate, dim=-1)
# The following lines have dimensions: <float>[batch, char_seq, char_dim].
input_char_embedddings = self._embed_chars(
codepoints=atom_input_ids, segment_ids=atom_segment_ids)
# Contextualize character embeddings.
input_char_encoding = self._encode_initial_chars(input_char_embedddings,
char_attention_mask)
# Downsample chars to molecules.
# The following lines have dimensions: [batch, molecule_seq, molecule_dim].
# In this transformation, we change the dimensionality from `char_dim` to
# `molecule_dim`, but do *NOT* add a resnet connection. Instead, we rely on
# the resnet connections (a) from the final char transformer stack back into
# the original char transformer stack and (b) the resnet connections from
# the final char transformer stack back into the deep BERT stack of
# molecules.
#
# Empirically, it is critical to use a powerful enough transformation here:
# mean pooling causes training to diverge with huge gradient norms in this
# region of the model; using a convolution here resolves this issue. From
# this, it seems that molecules and characters require a very different
# feature space; intuitively, this makes sense.
with tf.variable_scope("initial_char_encoder"):
init_molecule_encoding = self._chars_to_molecules(
input_char_encoding,
expected_molecule_seq_length=molecule_seq_length)
bert_layers: Sequence[tf.Tensor] = self._bert_stack(
molecules_in=init_molecule_encoding,
attention_mask=molecule_attention_mask)
bert_molecule_encoding = bert_layers[-1]
self.final_char_encoding = self._encode_final_chars(
input_char_encoding,
char_attention_mask=char_attention_mask,
full_molecules=bert_molecule_encoding,
final_seq_char_positions=final_seq_char_positions)
# For pooling (sequence-level tasks), we use only the output of the deep
# BERT stack since we would end up with reduced dimensionality at each
# character position.
self.pooled = self._pool(bert_molecule_encoding)
self.molecule_seq_length = molecule_seq_length
self.downsampled_layers = bert_layers
@tc.contract(
tc.Require(
"codepoints", shape=["batch", "char_seq"], dtype=tf.int32),
tc.RequireTrue(_is_valid_codepoint, tensors=["codepoints"],
error="Expected `codepoints` to contain valid Unicode "
"codepoints."),
tc.Ensure(
tc.RESULT,
dtype=tf.float32,
shape=["batch", "char_seq", "char_dim"]),
tc.NamedDim("batch", "codepoints", 0),
tc.NamedDim("char_seq", "codepoints", 1),
tc.NamedDim("char_dim", value_of="self.config.hidden_size"))
def _embed_chars(self, codepoints: tf.Tensor,
segment_ids: tf.Tensor) -> tf.Tensor:
"""Lookup character embeddings given integer Unicode codepoints."""
with tf.variable_scope("char_embeddings"):
embed_seq = self._embed_hash_buckets(
ids=codepoints,
embedding_size=self.config.hidden_size,
num_hashes=self.config.num_hash_functions,
num_buckets=self.config.num_hash_buckets,
initializer_range=self.config.initializer_range)
dropout_prob = (
self.config.hidden_dropout_prob if self._is_training else 0.0)
return bert_modeling.embedding_postprocessor(
input_tensor=embed_seq,
use_token_type=True,
token_type_ids=segment_ids,
token_type_vocab_size=self.config.type_vocab_size,
token_type_embedding_name="segment_embeddings",
use_position_embeddings=True,
position_embedding_name="char_position_embeddings",
initializer_range=self.config.initializer_range,
max_position_embeddings=self.config.max_positions,
dropout_prob=dropout_prob)
@tc.contract(
tc.Require("char_embed_seq", shape=["batch", "char_seq", "char_dim"]),
tc.Ensure(tc.RESULT, shape=["batch", "char_seq", "char_dim"]),
tc.NamedDim("batch", "char_embed_seq", 0),
tc.NamedDim("char_seq", "char_embed_seq", 1),
tc.NamedDim("char_dim", "char_embed_seq", 2))
def _encode_initial_chars(self, char_embed_seq: tf.Tensor,
char_attention_mask: tf.Tensor) -> tf.Tensor:
"""Encode characters using shallow/low dim transformer."""
with tf.variable_scope("initial_char_encoder"):
return local_attention.local_transformer_model(
input_tensor=char_embed_seq,
attention_mask=char_attention_mask,
hidden_size=self.config.hidden_size,
num_hidden_layers=1,
num_attention_heads=self.config.num_attention_heads,
intermediate_size=self.config.intermediate_size,
intermediate_act_fn=bert_modeling.get_activation(
self.config.hidden_act),
hidden_dropout_prob=self.config.hidden_dropout_prob,
attention_probs_dropout_prob=(
self.config.attention_probs_dropout_prob),
initializer_range=self.config.initializer_range,
always_attend_to_first_position=False,
first_position_attends_to_all=False,
attend_from_chunk_width=self.config.local_transformer_stride,
attend_from_chunk_stride=self.config.local_transformer_stride,
attend_to_chunk_width=self.config.local_transformer_stride,
attend_to_chunk_stride=self.config.local_transformer_stride)
@tc.contract(
tc.Require("char_encoding", shape=["batch", "char_seq", "char_dim"]),
tc.Ensure(
tc.RESULT,
shape=["batch", "molecule_seq", "molecule_dim"]),
tc.NamedDim("batch", "char_encoding", 0),
tc.NamedDim("char_seq", "char_encoding", 1),
tc.NamedDim("char_dim", "char_encoding", 2),
tc.NamedDim("molecule_seq", value_of="expected_molecule_seq_length"),
tc.NamedDim("molecule_dim", value_of="self.config.hidden_size"))
def _chars_to_molecules(
self,
char_encoding: tf.Tensor,
expected_molecule_seq_length: tf.Tensor) -> tf.Tensor:
"""Convert char seq to initial molecule seq."""
del expected_molecule_seq_length # Used by contract only.
with tf.variable_scope("initial_char_encoder/chars_to_molecules"):
downsampled = tf.layers.conv1d(
inputs=char_encoding,
filters=self.config.hidden_size,
kernel_size=self.config.downsampling_rate,
strides=self.config.downsampling_rate,
padding="valid",
activation=bert_modeling.get_activation(self.config.hidden_act),
name="conv")
# `cls_encoding`: [batch, 1, hidden_size]
cls_encoding = char_encoding[:, 0:1, :]
# Truncate the last molecule in order to reserve a position for [CLS].
# Often, the last position is never used (unless we completely fill the
# text buffer). This is important in order to maintain alignment on TPUs
# (i.e. a multiple of 128).
downsampled_truncated = downsampled[:, 0:-1, :]
# We also keep [CLS] as a separate sequence position since we always
# want to reserve a position (and the model capacity that goes along
# with that) in the deep BERT stack.
# `result`: [batch, molecule_seq, molecule_dim]
result = tf.concat([cls_encoding, downsampled_truncated], axis=1)
return bert_modeling.layer_norm(result)
@tc.contract(
tc.Require("molecules_in", shape=["batch", "seq", "dim"]),
tc.Require("attention_mask", shape=["batch", "seq", "seq"]),
tc.Ensure(tc.RESULT, tuple_index=0, shape=["batch", "seq", "dim"]),
tc.NamedDim("batch", "molecules_in", 0),
tc.NamedDim("seq", "molecules_in", 1),
tc.NamedDim("dim", "molecules_in", 2))
def _bert_stack(self, molecules_in: tf.Tensor,
attention_mask: tf.Tensor) -> Sequence[tf.Tensor]:
"""Encode the molecules using a deep transformer stack."""
with tf.variable_scope("bert"):
return bert_modeling.transformer_model(
input_tensor=molecules_in,
attention_mask=attention_mask,
hidden_size=self.config.hidden_size,
num_hidden_layers=self.config.num_hidden_layers,
num_attention_heads=self.config.num_attention_heads,
intermediate_size=self.config.intermediate_size,
intermediate_act_fn=bert_modeling.get_activation(
self.config.hidden_act),
hidden_dropout_prob=self.config.hidden_dropout_prob,
attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,
initializer_range=self.config.initializer_range,
do_return_all_layers=True)
@tc.contract(
tc.Require("molecules", shape=["batch", "molecule_seq", "molecule_dim"]),
tc.Require("char_seq_length", dtype=tf.int32, rank=0),
tc.Ensure(tc.RESULT, shape=["batch", "char_seq", "molecule_dim"]),
tc.NamedDim("batch", "molecules", 0),
tc.NamedDim("molecule_seq", "molecules", 1),
tc.NamedDim("molecule_dim", "molecules", 2),
tc.NamedDim("char_seq", value_of="char_seq_length"))
def _repeat_molecules(self, molecules: tf.Tensor,
char_seq_length: tf.Tensor) -> tf.Tensor:
"""Repeats molecules to make them the same length as the char sequence."""
rate = self.config.downsampling_rate
molecules_without_extra_cls = molecules[:, 1:, :]
# `repeated`: [batch_size, almost_char_seq_len, molecule_hidden_size]
repeated = tf.repeat(molecules_without_extra_cls, repeats=rate, axis=-2)
# So far, we've repeated the elements sufficient for any `char_seq_length`
# that's a multiple of `downsampling_rate`. Now we account for the last
# n elements (n < `downsampling_rate`), i.e. the remainder of floor
# division. We do this by repeating the last molecule a few extra times.
last_molecule = molecules[:, -1:, :]
remainder_length = tf.floormod(char_seq_length, rate)
remainder_repeated = tf.repeat(
last_molecule,
# +1 molecule to compensate for truncation.
repeats=remainder_length + rate,
axis=-2)
# `repeated`: [batch_size, char_seq_len, molecule_hidden_size]
return tf.concat([repeated, remainder_repeated], axis=-2)
@tc.contract(
tc.Require(
"final_char_input_seq", shape=["batch", "char_seq", "init_char_dim"]),
tc.Require(
"char_attention_mask",
dtype=tf.float32,
shape=["batch", "char_seq", "char_seq"]),
tc.Require(
"full_molecules", shape=["batch", "molecule_seq", "molecule_dim"]),
tc.Ensure(tc.RESULT, shape=["batch", "final_char_seq", "final_char_dim"]),
tc.NamedDim("batch", "final_char_input_seq", 0),
tc.NamedDim("char_seq", "final_char_input_seq", 1),
tc.NamedDim("final_char_seq", value_of="self._final_char_seq_length"),
tc.NamedDim("init_char_dim", "final_char_input_seq", 2),
tc.NamedDim("final_char_dim", value_of="self.config.hidden_size"),
tc.NamedDim("molecule_seq", "full_molecules", 1),
tc.NamedDim("molecule_dim", "full_molecules", 2))
def _encode_final_chars(
self, final_char_input_seq: tf.Tensor, char_attention_mask: tf.Tensor,
full_molecules: tf.Tensor,
final_seq_char_positions: Optional[tf.Tensor]) -> tf.Tensor:
"""Run a shallow/low-dim transformer to get a final character encoding."""
# `final_char_input_seq` is a projected version of the deep molecule BERT
# stack with slice-wise resnet connections.
with tf.variable_scope("final_char_encoder"):
# `upsampled`: [batch_size, char_seq_len, hidden_size]
upsampled = self._upsample_molecules_to_chars(final_char_input_seq,
full_molecules)
if final_seq_char_positions is not None:
# Limit transformer query seq and attention mask to these character
# positions to greatly reduce the compute cost. Typically, this is just
# done for the MLM training task.
# `final_seq_char_query`: [batch, final_seq_char_len, char_dim]
final_seq_char_query = tf.gather(
upsampled, final_seq_char_positions, batch_dims=1)
char_attention_mask = tf.gather(
char_attention_mask, final_seq_char_positions, batch_dims=1)
else:
final_seq_char_query = upsampled
return bert_modeling.transformer_model(
input_tensor=final_seq_char_query,
input_kv_tensor=upsampled,
attention_mask=char_attention_mask,
hidden_size=self.config.hidden_size,
num_hidden_layers=1,
num_attention_heads=self.config.num_attention_heads,
intermediate_size=self.config.intermediate_size,
intermediate_act_fn=bert_modeling.get_activation(
self.config.hidden_act),
hidden_dropout_prob=self.config.hidden_dropout_prob,
attention_probs_dropout_prob=(
self.config.attention_probs_dropout_prob),
initializer_range=self.config.initializer_range)
@tc.contract(
tc.Require(
"final_char_input_seq", shape=["batch", "char_seq", "init_char_dim"]),
tc.Require(
"full_molecules", shape=["batch", "molecule_seq", "molecule_dim"]),
tc.Ensure(tc.RESULT, shape=["batch", "char_seq", "final_char_dim"]),
tc.NamedDim("batch", "final_char_input_seq", 0),
tc.NamedDim("char_seq", "final_char_input_seq", 1),
tc.NamedDim("init_char_dim", "final_char_input_seq", 2),
tc.NamedDim("final_char_dim", value_of="self.config.hidden_size"),
tc.NamedDim("molecule_seq", "full_molecules", 1),
tc.NamedDim("molecule_dim", "full_molecules", 2))
def _upsample_molecules_to_chars(self, final_char_input_seq: tf.Tensor,
full_molecules: tf.Tensor) -> tf.Tensor:
"""Run a shallow/low-dim transformer to get a final character encoding."""
_, char_seq_length, _ = bert_modeling.get_shape_list(final_char_input_seq)
# `repeated_molecules`: [batch_size, char_seq_len, molecule_hidden_size]
repeated_molecules = self._repeat_molecules(
full_molecules, char_seq_length=char_seq_length)
# `concat`:
# [batch_size, char_seq_len, molecule_hidden_size+char_hidden_final]
concat = tf.concat([final_char_input_seq, repeated_molecules], axis=-1)
# `upsampled`: [batch_size, char_seq_len, hidden_size]
upsampled = tf.layers.conv1d(
inputs=concat,
filters=self.config.hidden_size,
kernel_size=self.config.upsampling_kernel_size,
strides=1,
padding="same",
activation=bert_modeling.get_activation(self.config.hidden_act),
name="conv")
upsampled = bert_modeling.layer_norm(upsampled)
if self._is_training:
upsampled = bert_modeling.dropout(upsampled,
self.config.hidden_dropout_prob)
return upsampled
@tc.contract(
tc.Require("seq_to_pool",
shape=["batch", tc.Unchecked("seq"), "hidden_size"]),
tc.Ensure(tc.RESULT, shape=["batch", "hidden_size"]),
tc.NamedDim("batch", "seq_to_pool", 0),
tc.NamedDim("hidden_size", "seq_to_pool", 2))
def _pool(self, seq_to_pool: tf.Tensor) -> tf.Tensor:
"""Grab the [CLS] molecule for use in classification tasks."""
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained.
# This snippet is taken from vanilla BERT.
first_token_tensor = tf.squeeze(seq_to_pool[:, 0:1, :], axis=1)
return tf.layers.dense(
first_token_tensor,
self.config.hidden_size,
activation=tf.tanh,
kernel_initializer=bert_modeling.create_initializer(
self.config.initializer_range))
@tc.contract(
tc.Require("char_attention_mask", dtype=tf.float32,
shape=["batch", tc.Unchecked("seq"), tc.Unchecked("seq")]),
tc.Ensure(tc.RESULT, dtype=tf.float32,
shape=["batch", tc.Unchecked("seq"), tc.Unchecked("seq")]),
tc.NamedDim("batch", "char_attention_mask", 0))
def downsample_attention_mask(self,
char_attention_mask: tf.Tensor,
downsampling_rate: int,
dim: int = -1) -> tf.Tensor:
"""Downsample one dimension of an attention mask."""
perm = None
if dim != -1:
ndims = 3
perm = list(range(ndims))
# Swap desired dimension with last dimension at beginning/end of
# function.
perm[dim], perm[-1] = perm[-1], perm[dim]
if perm is not None:
char_attention_mask = tf.transpose(char_attention_mask, perm)
# `poolable_char_mask`: <float>[batch, char_seq, char_seq, 1]
poolable_char_mask = tf.expand_dims(char_attention_mask, axis=-1)
# `poolable_char_mask`: <float>[batch, from_seq, to_seq, 1]
pooled_molecule_mask = tf.nn.max_pool2d(
input=poolable_char_mask,
ksize=[1, downsampling_rate],
strides=[1, downsampling_rate],
padding="VALID")
# `molecule_attention_mask`: <float>[batch, from_seq, to_seq]
molecule_attention_mask = tf.squeeze(pooled_molecule_mask, axis=-1)
if perm is not None:
molecule_attention_mask = tf.transpose(molecule_attention_mask, perm)
return molecule_attention_mask
def _hash_bucket_tensors(self, ids: tf.Tensor, num_hashes: int,
num_buckets: int) -> Sequence[tf.Tensor]:
"""Converts ids to hash bucket ids via multiple hashing.
Args:
ids: The codepoints or other IDs to be hashed.
num_hashes: The number of hash functions to use.
num_buckets: The number of hash buckets (i.e. embeddings in each table).
Returns:
A sequence of tensors, each of which is the hash bucket IDs from one hash
function.
"""
if num_hashes > len(_PRIMES):
raise ValueError(f"`num_hashes` must be <= {len(_PRIMES)}")
primes = _PRIMES[:num_hashes]
result_tensors = []
for prime in primes:
hashed = ((ids + 1) * prime) % num_buckets
result_tensors.append(hashed)
return result_tensors
@tc.contract(
tc.Require("ids", dtype=tf.int32, shape=["batch", "seq"]),
tc.Ensure(tc.RESULT, dtype=tf.float32, shape=["batch", "seq", "dim"]),
tc.NamedDim("batch", "ids", 0),
tc.NamedDim("seq", "ids", 1),
tc.NamedDim("dim", value_of="embedding_size"))
def _embed_hash_buckets(self, ids: tf.Tensor, embedding_size: int,
num_hashes: int, num_buckets: int,
initializer_range: int) -> tf.Tensor:
"""Converts IDs (e.g. codepoints) into embeddings via multiple hashing.
Args:
ids: The codepoints or other IDs to be hashed.
embedding_size: The dimensionality of the returned embeddings.
num_hashes: The number of hash functions to use.
num_buckets: The number of hash buckets (i.e. embeddings in each table).
initializer_range: Maximum absolute value for initial weights.
Returns:
The codepoint emeddings.
"""
if embedding_size % num_hashes != 0:
raise ValueError(f"Expected `embedding_size` ({embedding_size}) % "
f"`num_hashes` ({num_hashes}) == 0")
shard_embedding_size = embedding_size // num_hashes
hash_bucket_tensors = self._hash_bucket_tensors(
ids, num_hashes=num_hashes, num_buckets=num_buckets)
embedding_shards = []
for i, hash_bucket_ids in enumerate(hash_bucket_tensors):
embedding_table = tf.get_variable(
name=f"embeddings/HashBucketCodepointEmbedder_{i}",
shape=[num_buckets, shard_embedding_size],
initializer=bert_modeling.create_initializer(initializer_range))
shard_embeddings = tf.nn.embedding_lookup(embedding_table,
hash_bucket_ids)
embedding_shards.append(shard_embeddings)
return tf.concat(embedding_shards, axis=-1)
@tc.contract(
tc.Ensure(tc.RESULT, tuple_index=-1, dtype=tf.float32,
shape=["batch", "downsampled_seq", "hidden_size"]),
tc.NamedDim("batch", value_of="self._batch_size"),
tc.NamedDim("downsampled_seq", value_of="self.molecule_seq_length"),
tc.NamedDim("hidden_size", value_of="self.config.hidden_size"))
def get_downsampled_layers(self) -> Sequence[tf.Tensor]:
"""Gets a sequence representation, one position per character."""
assert len(self.downsampled_layers) == self.config.num_hidden_layers
return self.downsampled_layers
@tc.contract(
tc.Ensure(tc.RESULT, dtype=tf.float32,
shape=["batch", "char_seq", "hidden_size"]),
tc.NamedDim("batch", value_of="self._batch_size"),
tc.NamedDim("char_seq", value_of="self._final_char_seq_length"),
tc.NamedDim("hidden_size",
value_of="self.config.hidden_size"))
def get_sequence_output(self) -> tf.Tensor:
"""Gets a sequence representation, one position per character."""
return self.final_char_encoding
@tc.contract(
tc.Ensure(tc.RESULT, dtype=tf.float32, shape=["batch", "hidden_size"]),
tc.NamedDim("batch", value_of="self._batch_size"),
tc.NamedDim("hidden_size", value_of="self.config.hidden_size"))
def get_pooled_output(self) -> tf.Tensor:
"""Gets a single sequence representation for classification."""
return self.pooled
| |
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.sparse import csc_matrix
from scipy.optimize._trustregion_constr.projections \
import projections, orthogonality
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
import pytest
import sys
import platform
try:
from sksparse.cholmod import cholesky_AAt
sksparse_available = True
available_sparse_methods = ("NormalEquation", "AugmentedSystem")
except ImportError:
import warnings
sksparse_available = False
available_sparse_methods = ("AugmentedSystem",)
available_dense_methods = ('QRFactorization', 'SVDFactorization')
class TestProjections(TestCase):
def test_nullspace_and_least_squares_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At_dense = A_dense.T
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At_dense, z)[0]
assert_array_almost_equal(x, x2)
def test_iterative_refinements_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
atol = 1e-13 * abs(x).max()
err = abs(A.dot(x)).max()
assert_allclose(A.dot(x), 0, atol=atol)
# Test orthogonality
assert_allclose(orthogonality(A, x), 0, atol=1e-13)
def test_rowspace_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_sparse_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A_dense, x))
assert_equal(np.linalg.matrix_rank(A_dense),
np.linalg.matrix_rank(A_ext))
def test_nullspace_and_least_squares_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At = A.T
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_dense_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At, z)[0]
assert_array_almost_equal(x, x2)
def test_compare_dense_and_sparse(self):
D = np.diag(range(1, 101))
A = np.hstack([D, D, D, D])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(20):
z = np.random.normal(size=(400,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(100,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_compare_dense_and_sparse2(self):
D1 = np.diag([-1.7, 1, 0.5])
D2 = np.diag([1, -0.6, -0.3])
D3 = np.diag([-0.3, -1.5, 2])
A = np.hstack([D1, D2, D3])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(1):
z = np.random.normal(size=(9,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(3,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_iterative_refinements_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_dense_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0, decimal=14)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0, decimal=16)
def test_rowspace_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_dense_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A, x))
assert_equal(np.linalg.matrix_rank(A),
np.linalg.matrix_rank(A_ext))
class TestOrthogonality(TestCase):
def test_dense_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
def test_sparse_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A)
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
| |
from sympy import (log, sqrt, Rational as R, Symbol, I, exp, pi, S,
cos, sin, Mul, Pow, O)
from sympy.simplify.radsimp import expand_numer
from sympy.simplify.simplify import expand
from sympy.core.function import expand_multinomial, expand_power_base
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises
from sympy.utilities.randtest import verify_numerically
from sympy.abc import x, y, z
def test_expand_no_log():
assert (
(1 + log(x**4))**2).expand(log=False) == 1 + 2*log(x**4) + log(x**4)**2
assert ((1 + log(x**4))*(1 + log(x**3))).expand(
log=False) == 1 + log(x**4) + log(x**3) + log(x**4)*log(x**3)
def test_expand_no_multinomial():
assert ((1 + x)*(1 + (1 + x)**4)).expand(multinomial=False) == \
1 + x + (1 + x)**4 + x*(1 + x)**4
def test_expand_negative_integer_powers():
expr = (x + y)**(-2)
assert expr.expand() == 1 / (2*x*y + x**2 + y**2)
assert expr.expand(multinomial=False) == (x + y)**(-2)
expr = (x + y)**(-3)
assert expr.expand() == 1 / (3*x*x*y + 3*x*y*y + x**3 + y**3)
assert expr.expand(multinomial=False) == (x + y)**(-3)
expr = (x + y)**(2) * (x + y)**(-4)
assert expr.expand() == 1 / (2*x*y + x**2 + y**2)
assert expr.expand(multinomial=False) == (x + y)**(-2)
def test_expand_non_commutative():
A = Symbol('A', commutative=False)
B = Symbol('B', commutative=False)
C = Symbol('C', commutative=False)
a = Symbol('a')
b = Symbol('b')
i = Symbol('i', integer=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
p = Symbol('p', polar=True)
np = Symbol('p', polar=False)
assert (C*(A + B)).expand() == C*A + C*B
assert (C*(A + B)).expand() != A*C + B*C
assert ((A + B)**2).expand() == A**2 + A*B + B*A + B**2
assert ((A + B)**3).expand() == (A**2*B + B**2*A + A*B**2 + B*A**2 +
A**3 + B**3 + A*B*A + B*A*B)
# issue 6219
assert ((a*A*B*A**-1)**2).expand() == a**2*A*B**2/A
# Note that (a*A*B*A**-1)**2 is automatically converted to a**2*(A*B*A**-1)**2
assert ((a*A*B*A**-1)**2).expand(deep=False) == a**2*(A*B*A**-1)**2
assert ((a*A*B*A**-1)**2).expand() == a**2*(A*B**2*A**-1)
assert ((a*A*B*A**-1)**2).expand(force=True) == a**2*A*B**2*A**(-1)
assert ((a*A*B)**2).expand() == a**2*A*B*A*B
assert ((a*A)**2).expand() == a**2*A**2
assert ((a*A*B)**i).expand() == a**i*(A*B)**i
assert ((a*A*(B*(A*B/A)**2))**i).expand() == a**i*(A*B*A*B**2/A)**i
# issue 6558
assert (A*B*(A*B)**-1).expand() == A*B*(A*B)**-1
assert ((a*A)**i).expand() == a**i*A**i
assert ((a*A*B*A**-1)**3).expand() == a**3*A*B**3/A
assert ((a*A*B*A*B/A)**3).expand() == \
a**3*A*B*(A*B**2)*(A*B**2)*A*B*A**(-1)
assert ((a*A*B*A*B/A)**-3).expand() == \
a**-3*(A*B*(A*B**2)*(A*B**2)*A*B*A**(-1))**-1
assert ((a*b*A*B*A**-1)**i).expand() == a**i*b**i*(A*B/A)**i
assert ((a*(a*b)**i)**i).expand() == a**i*a**(i**2)*b**(i**2)
e = Pow(Mul(a, 1/a, A, B, evaluate=False), S(2), evaluate=False)
assert e.expand() == A*B*A*B
assert sqrt(a*(A*b)**i).expand() == sqrt(a*b**i*A**i)
assert (sqrt(-a)**a).expand() == sqrt(-a)**a
assert expand((-2*n)**(i/3)) == 2**(i/3)*(-n)**(i/3)
assert expand((-2*n*m)**(i/a)) == (-2)**(i/a)*(-n)**(i/a)*(-m)**(i/a)
assert expand((-2*a*p)**b) == 2**b*p**b*(-a)**b
assert expand((-2*a*np)**b) == 2**b*(-a*np)**b
assert expand(sqrt(A*B)) == sqrt(A*B)
assert expand(sqrt(-2*a*b)) == sqrt(2)*sqrt(-a*b)
def test_expand_radicals():
a = (x + y)**R(1, 2)
assert (a**1).expand() == a
assert (a**3).expand() == x*a + y*a
assert (a**5).expand() == x**2*a + 2*x*y*a + y**2*a
assert (1/a**1).expand() == 1/a
assert (1/a**3).expand() == 1/(x*a + y*a)
assert (1/a**5).expand() == 1/(x**2*a + 2*x*y*a + y**2*a)
a = (x + y)**R(1, 3)
assert (a**1).expand() == a
assert (a**2).expand() == a**2
assert (a**4).expand() == x*a + y*a
assert (a**5).expand() == x*a**2 + y*a**2
assert (a**7).expand() == x**2*a + 2*x*y*a + y**2*a
def test_expand_modulus():
assert ((x + y)**11).expand(modulus=11) == x**11 + y**11
assert ((x + sqrt(2)*y)**11).expand(modulus=11) == x**11 + 10*sqrt(2)*y**11
assert (x + y/2).expand(modulus=1) == y/2
raises(ValueError, lambda: ((x + y)**11).expand(modulus=0))
raises(ValueError, lambda: ((x + y)**11).expand(modulus=x))
def test_issue_5743():
assert (x*sqrt(
x + y)*(1 + sqrt(x + y))).expand() == x**2 + x*y + x*sqrt(x + y)
assert (x*sqrt(
x + y)*(1 + x*sqrt(x + y))).expand() == x**3 + x**2*y + x*sqrt(x + y)
def test_expand_frac():
assert expand((x + y)*y/x/(x + 1), frac=True) == \
(x*y + y**2)/(x**2 + x)
assert expand((x + y)*y/x/(x + 1), numer=True) == \
(x*y + y**2)/(x*(x + 1))
assert expand((x + y)*y/x/(x + 1), denom=True) == \
y*(x + y)/(x**2 + x)
eq = (x + 1)**2/y
assert expand_numer(eq, multinomial=False) == eq
def test_issue_6121():
eq = -I*exp(-3*I*pi/4)/(4*pi**(S(3)/2)*sqrt(x))
assert eq.expand(complex=True) # does not give oo recursion
def test_expand_power_base():
assert expand_power_base((x*y*z)**4) == x**4*y**4*z**4
assert expand_power_base((x*y*z)**x).is_Pow
assert expand_power_base((x*y*z)**x, force=True) == x**x*y**x*z**x
assert expand_power_base((x*(y*z)**2)**3) == x**3*y**6*z**6
assert expand_power_base((sin((x*y)**2)*y)**z).is_Pow
assert expand_power_base(
(sin((x*y)**2)*y)**z, force=True) == sin((x*y)**2)**z*y**z
assert expand_power_base(
(sin((x*y)**2)*y)**z, deep=True) == (sin(x**2*y**2)*y)**z
assert expand_power_base(exp(x)**2) == exp(2*x)
assert expand_power_base((exp(x)*exp(y))**2) == exp(2*x)*exp(2*y)
assert expand_power_base(
(exp((x*y)**z)*exp(y))**2) == exp(2*(x*y)**z)*exp(2*y)
assert expand_power_base((exp((x*y)**z)*exp(
y))**2, deep=True, force=True) == exp(2*x**z*y**z)*exp(2*y)
assert expand_power_base((exp(x)*exp(y))**z).is_Pow
assert expand_power_base(
(exp(x)*exp(y))**z, force=True) == exp(x)**z*exp(y)**z
def test_expand_arit():
a = Symbol("a")
b = Symbol("b", positive=True)
c = Symbol("c")
p = R(5)
e = (a + b)*c
assert e == c*(a + b)
assert (e.expand() - a*c - b*c) == R(0)
e = (a + b)*(a + b)
assert e == (a + b)**2
assert e.expand() == 2*a*b + a**2 + b**2
e = (a + b)*(a + b)**R(2)
assert e == (a + b)**3
assert e.expand() == 3*b*a**2 + 3*a*b**2 + a**3 + b**3
assert e.expand() == 3*b*a**2 + 3*a*b**2 + a**3 + b**3
e = (a + b)*(a + c)*(b + c)
assert e == (a + c)*(a + b)*(b + c)
assert e.expand() == 2*a*b*c + b*a**2 + c*a**2 + b*c**2 + a*c**2 + c*b**2 + a*b**2
e = (a + R(1))**p
assert e == (1 + a)**5
assert e.expand() == 1 + 5*a + 10*a**2 + 10*a**3 + 5*a**4 + a**5
e = (a + b + c)*(a + c + p)
assert e == (5 + a + c)*(a + b + c)
assert e.expand() == 5*a + 5*b + 5*c + 2*a*c + b*c + a*b + a**2 + c**2
x = Symbol("x")
s = exp(x*x) - 1
e = s.nseries(x, 0, 3)/x**2
assert e.expand() == 1 + x**2/2 + O(x**4)
e = (x*(y + z))**(x*(y + z))*(x + y)
assert e.expand(power_exp=False, power_base=False) == x*(x*y + x*
z)**(x*y + x*z) + y*(x*y + x*z)**(x*y + x*z)
assert e.expand(power_exp=False, power_base=False, deep=False) == x* \
(x*(y + z))**(x*(y + z)) + y*(x*(y + z))**(x*(y + z))
e = (x*(y + z))**z
assert e.expand(power_base=True, mul=True, deep=True) in [x**z*(y +
z)**z, (x*y + x*z)**z]
assert ((2*y)**z).expand() == 2**z*y**z
p = Symbol('p', positive=True)
assert sqrt(-x).expand().is_Pow
assert sqrt(-x).expand(force=True) == I*sqrt(x)
assert ((2*y*p)**z).expand() == 2**z*p**z*y**z
assert ((2*y*p*x)**z).expand() == 2**z*p**z*(x*y)**z
assert ((2*y*p*x)**z).expand(force=True) == 2**z*p**z*x**z*y**z
assert ((2*y*p*-pi)**z).expand() == 2**z*pi**z*p**z*(-y)**z
assert ((2*y*p*-pi*x)**z).expand() == 2**z*pi**z*p**z*(-x*y)**z
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert ((-2*x*y*n)**z).expand() == 2**z*(-n)**z*(x*y)**z
assert ((-2*x*y*n*m)**z).expand() == 2**z*(-m)**z*(-n)**z*(-x*y)**z
# issue 5482
assert sqrt(-2*x*n) == sqrt(2)*sqrt(-n)*sqrt(x)
# issue 5605 (2)
assert (cos(x + y)**2).expand(trig=True) in [
(-sin(x)*sin(y) + cos(x)*cos(y))**2,
sin(x)**2*sin(y)**2 - 2*sin(x)*sin(y)*cos(x)*cos(y) + cos(x)**2*cos(y)**2
]
# Check that this isn't too slow
x = Symbol('x')
W = 1
for i in range(1, 21):
W = W * (x - i)
W = W.expand()
assert W.has(-1672280820*x**15)
def test_power_expand():
"""Test for Pow.expand()"""
a = Symbol('a')
b = Symbol('b')
p = (a + b)**2
assert p.expand() == a**2 + b**2 + 2*a*b
p = (1 + 2*(1 + a))**2
assert p.expand() == 9 + 4*(a**2) + 12*a
p = 2**(a + b)
assert p.expand() == 2**a*2**b
A = Symbol('A', commutative=False)
B = Symbol('B', commutative=False)
assert (2**(A + B)).expand() == 2**(A + B)
assert (A**(a + b)).expand() != A**(a + b)
def test_issues_5919_6830():
# issue 5919
n = -1 + 1/x
z = n/x/(-n)**2 - 1/n/x
assert expand(z) == 1/(x**2 - 2*x + 1) - 1/(x - 2 + 1/x) - 1/(-x + 1)
# issue 6830
p = (1 + x)**2
assert expand_multinomial((1 + x*p)**2) == (
x**2*(x**4 + 4*x**3 + 6*x**2 + 4*x + 1) + 2*x*(x**2 + 2*x + 1) + 1)
assert expand_multinomial((1 + (y + x)*p)**2) == (
2*((x + y)*(x**2 + 2*x + 1)) + (x**2 + 2*x*y + y**2)*
(x**4 + 4*x**3 + 6*x**2 + 4*x + 1) + 1)
A = Symbol('A', commutative=False)
p = (1 + A)**2
assert expand_multinomial((1 + x*p)**2) == (
x**2*(1 + 4*A + 6*A**2 + 4*A**3 + A**4) + 2*x*(1 + 2*A + A**2) + 1)
assert expand_multinomial((1 + (y + x)*p)**2) == (
(x + y)*(1 + 2*A + A**2)*2 + (x**2 + 2*x*y + y**2)*
(1 + 4*A + 6*A**2 + 4*A**3 + A**4) + 1)
assert expand_multinomial((1 + (y + x)*p)**3) == (
(x + y)*(1 + 2*A + A**2)*3 + (x**2 + 2*x*y + y**2)*(1 + 4*A +
6*A**2 + 4*A**3 + A**4)*3 + (x**3 + 3*x**2*y + 3*x*y**2 + y**3)*(1 + 6*A
+ 15*A**2 + 20*A**3 + 15*A**4 + 6*A**5 + A**6) + 1)
# unevaluate powers
eq = (Pow((x + 1)*((A + 1)**2), 2, evaluate=False))
# - in this case the base is not an Add so no further
# expansion is done
assert expand_multinomial(eq) == \
(x**2 + 2*x + 1)*(1 + 4*A + 6*A**2 + 4*A**3 + A**4)
# - but here, the expanded base *is* an Add so it gets expanded
eq = (Pow(((A + 1)**2), 2, evaluate=False))
assert expand_multinomial(eq) == 1 + 4*A + 6*A**2 + 4*A**3 + A**4
# coverage
def ok(a, b, n):
e = (a + I*b)**n
return verify_numerically(e, expand_multinomial(e))
for a in [2, S.Half]:
for b in [3, S(1)/3]:
for n in range(2, 6):
assert ok(a, b, n)
assert expand_multinomial((x + 1 + O(z))**2) == \
1 + 2*x + x**2 + O(z)
assert expand_multinomial((x + 1 + O(z))**3) == \
1 + 3*x + 3*x**2 + x**3 + O(z)
assert expand_multinomial(3**(x + y + 3)) == 27*3**(x + y)
def test_expand_log():
t = Symbol('t', positive=True)
# after first expansion, -2*log(2) + log(4); then 0 after second
assert expand(log(t**2) - log(t**2/4) - 2*log(2)) == 0
| |
# -*- coding: utf-8 -*-
# Copyright 2018 Sebastian Semper, Christoph Wagner
# https://www.tu-ilmenau.de/it-ems/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .Algorithm import Algorithm
from ..Matrix import Matrix
class ISTA(Algorithm):
r"""
Iterative Soft Thresholding Algorithm
**Definition and Interface**:
For a given matrix :math:`A \in \mathbb{C}^{m \times N}` with
:math:`m \ll N` and a vector :math:`b \in \mathbb{C}^m` we approximately
solve
.. math::
\min\limits_{ x \in \mathbb{C}^N}\Vert{ A \cdot x - b}\Vert^2_2 +
\lambda \cdot \Vert x \Vert_1,
where :math:`\lambda > 0` is a regularization parameter to steer the
trade-off between data fidelity and sparsity of the solution.
>>> # import the packages
>>> import numpy.linalg as npl
>>> import numpy as np
>>> import fastmat as fm
>>> import fastmat.algorithms as fma
>>> # define the dimensions and the sparsity
>>> n, k = 512, 3
>>> # define the sampling positions
>>> t = np.linspace(0, 20 * np.pi, n)
>>> # construct the convolution matrix
>>> c = np.cos(2 * t) * np.exp(-t ** 2)
>>> C = fm.Circulant(c)
>>> # create the ground truth
>>> x = np.zeros(n)
>>> x[np.random.choice(range(n), k, replace=0)] = 1
>>> b = C * x
>>> # reconstruct it
>>> ista = fma.ISTA(C, numLambda=0.005, numMaxSteps=100)
>>> y = ista.process(b)
>>> # test if they are close in the
>>> # domain of C
>>> print(npl.norm(C * y - b))
We solve a sparse deconvolution problem, where the atoms are harmonics
windowed by a gaussian envelope. The ground truth :math:`x` is build out
of three pulses at arbitrary locations.
.. note::
The proper choice of :math:`\lambda` is crucial for good perfomance
of this algorithm, but this is not an easy task. Unfortunately we are
not in the place here to give you a rule of thumb what to do, since
it highly depends on the application at hand. Again, consult [1]_ for
any further considerations of this matter.
.. [1] Amir Beck, Marc Teboulle, "A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems", SIAM Journal on Imaging
Sciences, 2009, Vol. 2, No. 1 : pp. 183-202
Parameters
----------
fmatA : fm.Matrix
the system matrix
arrB : np.ndarray
the measurement vector
numLambda : float, optional
the thresholding parameter; default is 0.1
numMaxSteps : int, optional
maximum number of steps; default is 100
Returns
-------
np.ndarray
solution array
"""
def __init__(self, fmatA, **kwargs):
# check the must-have parameters
if not isinstance(fmatA, Matrix):
raise TypeError("fmatA must be a fastmat matrix")
self.fmatA = fmatA
# set default parameters (and create attributes)
self.numLambda = 0.1
self.numMaxSteps = 100
# initialize callbacks
self.cbStep = None
# Update with extra arguments
self.updateParameters(**kwargs)
def softThreshold(self, arrX, numAlpha):
r"""
Do a soft thresholding step.
"""
# arrM - positive part of arrX - numAlpha
# arrX - vector to be thresholded
# numAlpha - thresholding threshold
self.arrM = np.maximum(np.abs(arrX) - numAlpha, 0)
return np.multiply((self.arrM / (self.arrM + numAlpha)), arrX)
def _process(self, arrB):
# fmatA - input system matrix
# arrB - input data vector (measurements)
# numLambda - balancing parameter in optimization problem
# between data fidelity and sparsity
# numMaxSteps - maximum number of steps to run
# numL - step size during the conjugate gradient step
if arrB.ndim > 2 or arrB.ndim < 1:
raise ValueError("Only n x m arrays are supported for ISTA")
if arrB.ndim == 1:
self.arrB = arrB.reshape((-1, 1))
else:
self.arrB = arrB
if self.numMaxSteps <= 0:
raise ValueError("ISTA would like to do at least one step for you")
# calculate the largest singular value to get the right step size
self.numL = 1.0 / (self.fmatA.largestSingularValue ** 2)
self.arrX = np.zeros(
(self.fmatA.numCols, self.arrB.shape[1]),
dtype=np.promote_types(np.float32, self.arrB.dtype)
)
# start iterating
for self.numStep in range(self.numMaxSteps):
# do the gradient step and threshold
self.arrStep = self.arrX - self.numL * self.fmatA.backward(
self.fmatA.forward(self.arrX) - self.arrB
)
self.arrX = self.softThreshold(
self.arrStep, self.numL * self.numLambda * 0.5
)
self.handleCallback(self.cbStep)
self.handleCallback(self.cbTrace)
# return the unthresholded values for all non-zero support elements
self.arrResult = np.where(self.arrX != 0, self.arrStep, self.arrX)
return self.arrResult
@staticmethod
def _getTest():
from ..inspect import TEST, dynFormat, arrSparseTestDist
from ..Product import Product
from ..Hadamard import Hadamard
from ..Matrix import Matrix
def testISTA(test):
# prepare vectors
numCols = test[TEST.NUM_COLS]
test[TEST.REFERENCE] = test[TEST.ALG_MATRIX].reference()
test[TEST.RESULT_REF] = np.hstack([
arrSparseTestDist(
(numCols, 1),
dtype=test[TEST.DATATYPE],
density=1. * test['numK'] / numCols
).toarray()
for nn in range(test[TEST.DATACOLS])
])
test[TEST.RESULT_INPUT] = test[TEST.ALG_MATRIX].array.dot(
test[TEST.RESULT_REF]
)
test[TEST.RESULT_OUTPUT] = test[TEST.INSTANCE].process(
test[TEST.RESULT_INPUT]
)
return {
TEST.ALGORITHM: {
'order' : 6,
TEST.NUM_ROWS : (lambda param: 3 * param['order']),
TEST.NUM_COLS : (lambda param: 2 ** param['order']),
'numK' : 'order',
'lambda' : 1.,
'maxSteps' : 10,
TEST.ALG_MATRIX : lambda param:
Product(Matrix(np.random.uniform(
-100, 100, (getattr(param, TEST.NUM_COLS),
getattr(param, TEST.NUM_COLS))).astype(
param['typeA'])),
Hadamard(param.order),
typeExpansion=param['typeA']),
'typeA' : TEST.Permutation(TEST.ALLTYPES),
TEST.OBJECT : ISTA,
TEST.INITARGS : [TEST.ALG_MATRIX],
TEST.INITKWARGS : {
'numLambda' : 'lambda',
'numMaxSteps' : 'maxSteps'
},
TEST.DATAALIGN : TEST.ALIGNMENT.DONTCARE,
TEST.INIT_VARIANT: TEST.IgnoreFunc(testISTA),
'strTypeA' : (lambda param: TEST.TYPENAME[param['typeA']]),
TEST.NAMINGARGS: dynFormat(
"(%dx%d)*Hadamard(%s)[%s]",
TEST.NUM_ROWS,
TEST.NUM_COLS,
'order',
'strTypeA'
),
# matrix inversion always expands data type to floating-point
TEST.TYPE_PROMOTION: np.float32,
TEST.CHECK_PROXIMITY: False
},
}
@staticmethod
def _getBenchmark():
return {}
| |
from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import SimPEG
import scipy.sparse as sp
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
survey.dobs = survey.dpred(mtrue)
survey.std = 0.05*np.ones_like(survey.dobs)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
dmisAll = DataMisfit.l2_DataMisfit(survey)
print '# of data: ', survey.dobs.shape
class SimultaneousSrc(DC.Src.BaseSrc):
"""
Dipole source
"""
QW = None
Q = None
W = None
def __init__(self, rxList,Q,W, **kwargs):
SimPEG.Survey.BaseSrc.__init__(self, rxList, **kwargs)
def eval(self, prob):
return self.QW
class SimultaneousRx(DC.Rx.BaseRx):
"""
SimultaneousRx receiver
"""
def __init__(self, locs, rxType='phi', **kwargs):
# We may not need this ...
SimPEG.Survey.BaseRx.__init__(self, locs, rxType)
@property
def nD(self):
"""Number of data in the receiver."""
return self.locs.shape[0]
# Not sure why ...
# return int(self.locs[0].size / 2)
def getP(self, mesh, Gloc):
return self.locs
P = []
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
P = rx.getP(mesh,'CC')
from SimPEG.Maps import IdentityMap
from scipy.fftpack import dct,idct
class DCTMap(IdentityMap):
def __init__(self, mesh=None, nP=None, **kwargs):
super(DCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return dct(dct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
class iDCTMap(IdentityMap):
def __init__(self, mesh, nP=None, **kwargs):
super(iDCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return idct(idct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
idctmap = iDCTMap(mesh)
dctmap = DCTMap(mesh)
import spgl1
#Parameter for SPGL1 iterations
nits = 10
mdct = (-5.)*np.ones_like(mtrue)
it = 0
#phi_d_normal = np.load('../phid_normal.npy')
#ratio = np.r_[6.5,phi_d_normal[0:-1]/phi_d_normal[1:]]
ratio = 10.*np.ones(nits)
min_progress = 1.2
xlist = []
#Parameters for W
nsubSrc = 5
InnerIt = 1
dmisfitsub = []
dmisfitall = []
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
#Initialize Random Source
W = np.random.randn(survey.nSrc,nsubSrc)
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
dmis = DataMisfit.l2_DataMisfit(survey_r)
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
print "end iteration: ",it, '; Overall Normalized Misfit: ', dmisAll.eval(mdct)/survey.nD
while (dmisAll.eval(mdct)/survey.nD)>0.5 and it<nits:
problem.unpair()
problem.pair(survey_r)
def JS(x,mode):
if mode == 1:
return problem.Jvec(mdct,idctmap*x)
else:
return dctmap*problem.Jtvec(mdct,x)
b = survey_r.dpred(mdct)-survey_r.dpred(mtrue)
print "# of data: ", b.shape
opts = spgl1.spgSetParms({'iterations':100, 'verbosity':2})
sigtol = np.linalg.norm(b)/np.maximum(ratio[it],min_progress)
#tautol = 20000.
x,resid,grad,info = spgl1.spg_bpdn(JS, b, sigma = sigtol,options=opts)
#x,resid,grad,info = spgl1.spg_lasso(JS,b,tautol,opts)
assert dmis.eval(mdct) > dmis.eval(mdct - idctmap*x)
mdct = mdct - idctmap*x
xlist.append(x)
it +=1
print "end iteration: ",it, '; Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
print "Dmisfit compared to full dataset: ",dmisAll.eval(mdct)/survey.nD
if np.mod(it,InnerIt) ==0:
W = np.random.randn(survey.nSrc,nsubSrc)
print 'update W'
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
dmis = DataMisfit.l2_DataMisfit(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
print "end Update W; iteration: ",it, '; New Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
problem.unpair()
problem.pair(survey)
np.save('./dmisfitsub.npy',dmisfitsub)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./mfinal.npy',mdct)
np.savez('./xlist.npz',xlist)
mm = mesh.plotImage(mdct)
plt.colorbar(mm[0])
plt.gca().set_xlim([-10.,10.])
plt.gca().set_ylim([-10.,0.])
plt.plot(cylinderPoints0[:,0],cylinderPoints0[:,1], linestyle = 'dashed', color='k')
plt.plot(cylinderPoints1[:,0],cylinderPoints1[:,1], linestyle = 'dashed', color='k')
plt.show()
| |
from data import datasets
from easyminercenter.lib.api import *
import logging
import sys
import getopt
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
API_URL = ''
API_KEY = ''
USE_AUTO_CONF_SUPP = False
USE_CBA = False
MAX_RULES_COUNT = 10000
MIN_CONFIDENCE = 0.5
MIN_SUPPORT = 0.01
MAX_RULE_LENGTH = 0
RESULTS_DIRECTORY = ""
CONTINUE_PREVIOUS_RUN = False
# region params
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["auto_conf_supp", "cba", "min_conf=", "min_supp=", "max_rule_length=", "max_rules_count=", "api_key=", "api_url=", "output=", "continue"])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
for option, value in opts:
if option == "--auto_conf_supp":
if value == "" or value == 1 or value == "=1":
USE_AUTO_CONF_SUPP = True
elif option == "--cba":
if value == "" or value == 1 or value == "=1":
USE_CBA = True
elif option == "--continue":
if value == "" or value == 1 or value == "=1":
CONTINUE_PREVIOUS_RUN = True
elif option == "--min_conf":
if not value.isdigit():
raise Exception("Param min_conf has to have numeric value from interval [0;1]")
value = float(value)
if float(value) > 1 or float(value) < 1:
raise Exception("Param min_conf has to have numeric value from interval [0;1]")
MIN_CONFIDENCE = value
elif option == "--min_supp":
if not value.isdigit():
raise Exception("Param min_supp has to have numeric value from interval [0;1]")
value = float(value)
if value > 1 or value < 1:
raise Exception("Param min_supp has to have numeric value from interval [0;1]")
MIN_SUPPORT = value
elif option == "--max_rule_length":
if not value.isdigit():
raise Exception("Param max_rule_length has to have integer value 0 or higher than 2")
try:
value = int(value)
except:
raise Exception("Param max_rule_length has to have integer value 0 or higher than 2")
if value>0 and value<2:
raise Exception("Param max_rule_length has to have integer value 0 or higher than 2")
MAX_RULE_LENGTH= value
elif option == "--max_rules_count":
if not value.isdigit():
raise "Param max_rules_count has to have integer value higher than 0"
try:
value = int(value)
except:
raise "Param max_rules_count has to have integer value higher than 0"
if value<1:
raise "Param max_rules_count has to have integer value higher than 0"
MAX_RULES_COUNT=value
elif option == "--api_key":
API_KEY=value
elif option == "--api_url":
API_URL=value
elif option == "--output":
RESULTS_DIRECTORY=value
# endregion params
# region check if results directory is writable
if RESULTS_DIRECTORY == "":
# vychozi adresar pro vysledky
RESULTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
if not RESULTS_DIRECTORY.endswith(os.sep):
RESULTS_DIRECTORY += os.sep
RESULTS_DIRECTORY += ".." + os.sep + ".." + os.sep + "results"
if RESULTS_DIRECTORY.endswith(os.sep):
# odstraneni lomitka z konce cesty k adresari s vysledky
RESULTS_DIRECTORY.rstrip(os.sep)
if not os.path.isdir(RESULTS_DIRECTORY):
raise Exception("Results directory does not exist: " + RESULTS_DIRECTORY)
logging.info("RESULTS DIRECTORY: " + os.path.realpath(RESULTS_DIRECTORY))
# endregion check if results directory is writable
datasets_list = datasets.get_all()
if API_KEY:
api = Api(API_URL, API_KEY)
else:
api = Api(API_URL)
#registrace noveho uzivatele
api.register_new_user()
#kontrola nastaveni pristupu
api.check_user_access()
def slowdown_counter():
"""
Function for slowdown effect (using counter processed_datasets_count)
:return:
"""
global processed_datasets_count
if processed_datasets_count > 10:
# slowdown the evaluation script due to backend server load
logging.info('--slow down--')
time.sleep(10)
processed_datasets_count = 0
else:
processed_datasets_count += 1
def get_results_filename(dataset_name: str, fold_id: int = -1, file_type: str = ".evalResult.json") -> str:
"""
Funkce vracejici jmeno souboru pro ulozeni vysledku klasifikace
:param dataset_name:str
:param fold_id:int
:param file_type: str
:return: str
"""
result = RESULTS_DIRECTORY + os.sep + dataset_name
if fold_id >= 0:
result += "_" + str(fold_id)
result += file_type
return result
# region remove current result files
if not CONTINUE_PREVIOUS_RUN:
for dataset in datasets_list:
dataset_name = dataset['name']
for fold_id in range(0, 10):
fold_results_filename = get_results_filename(dataset_name, fold_id)
if os.path.isfile(fold_results_filename):
os.remove(fold_results_filename)
logging.info("removed file: "+os.path.basename(fold_results_filename))
results_csv_filename=get_results_filename(dataset_name="_results", file_type=".summary.csv")
if os.path.isfile(results_csv_filename):
os.remove(results_csv_filename)
logging.info("removed file: " + os.path.basename(results_csv_filename))
results_json_filename = get_results_filename(dataset_name="_results", file_type=".summary.json")
if os.path.isfile(results_json_filename):
os.remove(results_json_filename)
logging.info("removed file: " + os.path.basename(results_json_filename))
# endregion remove current result files
# region test jednotlivych datasetu
processed_datasets_count=0
for dataset in datasets_list:
for fold_id in range(0, 10):
dataset_name = dataset['name']
target_variable = dataset['target_variable']
# kontrola, jestli nemame pokracovat v prechozim vyhodnoceni vysledku
if CONTINUE_PREVIOUS_RUN and os.path.isfile(get_results_filename(dataset_name, fold_id)):
# jiz existuje soubor s vysledky, preskocime tento beh
logging.info("SKIPPING: " + dataset_name + " " + fold_id)
continue
# maximalni pocet opakovani
repeat_count = 3
while repeat_count>0:
repeat_count -= 1
try:
# create dataset and miner
datasource_id = api.create_datasource(dataset_name, fold_id, TYPE_TRAIN)
miner_id = api.create_miner(datasource_id, dataset_name + str(fold_id))
# preprocess attributes
attributes_map = api.preprocess_fields_each_one(miner_id)
# create and run task
task_id = api.create_task(miner_id=miner_id, attributes_map=attributes_map, max_rules_count=MAX_RULES_COUNT,
use_cba=USE_CBA, auto_conf_supp=USE_AUTO_CONF_SUPP, max_rule_length=MAX_RULE_LENGTH,
im_conf=0.5, im_supp=0.01, target_column_name=dataset['target_variable'])
api.run_task(task_id)
# create test datasource and run scorer
test_datasource_id = api.create_datasource(dataset_name, fold_id, TYPE_TEST)
scorer_result = api.run_scorer(task_id, test_datasource_id)
# kontrola toho, zda nejsou vraceny jen nulove hodnoty
if not (int(scorer_result["rowCount"]) > 0):
raise Exception("Invalid scorer values, or accuracy is null!")
# ulozeni vysledku klasifikace do pracovniho souboru
output_file = open(get_results_filename(dataset_name, fold_id), "w")
output_file.write(json.dumps(scorer_result))
output_file.close()
slowdown_counter()
break
except Exception as e:
if repeat_count>0:
# slow down
logging.exception(e)
time.sleep(30)
else:
raise e
# endregion test jednotlivych datasetu
# region zpracovani vysledku
output_csv_file=open(get_results_filename(dataset_name="_results",file_type=".summary.csv"),"w")
output_csv_file.write("dataset;AVG rule count;test rows;true positives;false positives;uncovered;AVG accuracy;AVG of accuracies\n");
output_results = {}
for dataset in datasets_list:
rule_count = 0
row_count = 0
correct = 0
incorrect = 0
unclassified = 0
accuracy_avg = 0
dataset_name = dataset['name']
# region kontrola, jestli existuji foldy za cely dataset
process_dataset = True
for fold_id in range(0, 10):
if not os.path.isfile(get_results_filename(dataset_name, fold_id)):
process_dataset = False
if (not process_dataset):
logging.error('Dataset skipped [not all results available]: ' + dataset_name)
continue
# endregion kontrola, jestli existuji foldy za cely dataset
for fold_id in range(0, 10):
fold_results_file = open(get_results_filename(dataset_name, fold_id), "r")
fold_results = json.load(fold_results_file)
fold_results_file.close()
fold_results_data_correct = int(fold_results["correct"])
fold_results_data_row_count = int(fold_results["rowCount"])
rule_count += int(fold_results["task"]["rulesCount"])
row_count += fold_results_data_row_count
correct += fold_results_data_correct
incorrect += int(fold_results["incorrect"])
unclassified += int(fold_results["unclassified"])
accuracy_avg += (fold_results_data_correct / fold_results_data_row_count)
output_csv_file.write(dataset_name + ";"
+ str(rule_count / 10) + ";"
+ str(row_count) + ";"
+ str(correct) + ";"
+ str(incorrect) + ";"
+ str(unclassified) + ";"
+ str(correct / row_count) + ";"
+ str(accuracy_avg / 10)
+ "\n")
output_results[dataset_name] = {
"avg_rule_count": rule_count,
"row_count": row_count,
"correct": correct,
"incorrect": incorrect,
"unclassified": unclassified,
"accuracy": str(correct / row_count),
"avg_of_accuracies": str(accuracy_avg / 10)
}
output_csv_file.close()
output_json_file = open(get_results_filename(dataset_name="_results",file_type=".summary.json"), "w")
output_json_file.write(json.dumps(output_results))
output_json_file.close()
logging.info('EVALUATION FINISHED - result are in: ' + os.path.realpath(RESULTS_DIRECTORY))
# endregion zpracovani vysledku
| |
"""
The merger of argparse and cmd goes here. This holds the main base class
used by all commands.
"""
import collections
import functools
import inspect
import itertools
import re
import shlex
from . import supplement
from .. import completer, layout, eventing, session, paging
def parse_docstring(entity):
""" Return sanitized docstring from an entity. The first line of the
docstring is the title, and remaining lines are the details, aka git
style. """
doc = inspect.getdoc(entity)
if not doc:
return None, None
doc = doc.splitlines(keepends=True)
if not doc[0].strip():
doc.pop(0)
title = (doc and doc.pop(0).strip()) or None
if doc and not doc[0].strip():
doc.pop(0)
desc = ''.join(doc).rstrip() or None
return title, desc
class Command(eventing.Eventer):
""" The primary encapsulation for a shellish command. Each command or
subcommand should be an instance of this class. The docstring for sub-
classes is used in --help output for this command. """
name = None
title = None
desc = None
use_pager = False
ArgumentParser = supplement.ShellishParser
ArgumentFormatter = supplement.ShellishHelpFormatter
Session = session.Session
completion_excludes = {'--help'}
arg_label_fmt = '__command[%d]__'
env_scrub_re = '[^\w\s\-_]' # chars scrubed from env vars
env_flatten_re = '[\s\-_]+' # chars converted to underscores
def setup_args(self, parser):
""" Subclasses should provide any setup for their parsers here. """
pass
def prerun(self, args):
""" Hook to do something prior to invocation. """
pass
def postrun(self, args, result=None, exc=None):
""" Hook to do something following invocation. """
pass
def run(self, args):
""" Primary entry point for command exec. """
self.argparser.print_help()
raise SystemExit(1)
def __init__(self, parent=None, title=None, desc=None, name=None, run=None,
prerun=None, postrun=None, **context):
self.add_events(['prerun', 'postrun', 'setup_args', 'precomplete',
'postcomplete'])
if name:
self.name = name
if self.name is None:
raise RuntimeError("Command missing `name` attribute")
if title is not None:
self.title = title
if desc is not None:
self.desc = desc
if self.title is None and self.desc is None and \
type(self) is not Command:
self.title, self.desc = parse_docstring(self)
if run is not None:
self.run = run
if prerun is not None:
self.prerun = prerun
if postrun is not None:
self.postrun = postrun
self.subcommands = collections.OrderedDict()
self.default_subcommand = None
self.session = None
self.context_keys = set()
self._autoenv_actions = set()
self.inject_context(context)
self.subparsers = None
self.argparser = self.create_argparser()
self.parent = parent
self.setup_args(self.argparser)
self.fire_event('setup_args', self.argparser)
def get_or_create_session(self):
if self.session is None:
self.attach_session()
return self.session
def parse_args(self, argv=None):
""" Return an argparse.Namespace of the argv string or sys.argv if
argv is None. """
arg_input = shlex.split(argv) if argv is not None else None
self.get_or_create_session()
return self.argparser.parse_args(arg_input)
def __call__(self, args=None, argv=None):
""" If a subparser is present and configured we forward invocation to
the correct subcommand method. If all else fails we call run(). """
session = self.get_or_create_session()
if args is None:
args = self.parse_args(argv)
commands = self.get_commands_from(args)
if self.subparsers:
try:
command = commands[self.depth]
except IndexError:
pass
else:
return command(args)
if self.default_subcommand:
parser = self.default_subcommand.argparser
parser.parse_args([], namespace=args)
return self(args) # retry
return session.execute(self, args)
def __getitem__(self, item):
return self.subcommands[item]
def get_pager_spec(self):
""" Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. """
self_config = self.get_config()
pagercmd = self_config.get('pager')
istty = self_config.getboolean('pager_istty')
core_config = self.get_config('core')
if pagercmd is None:
pagercmd = core_config.get('pager')
if istty is None:
istty = core_config.get('pager_istty')
return {
"pagercmd": pagercmd,
"istty": istty
}
def run_wrap(self, args):
""" Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. """
self.fire_event('prerun', args)
self.prerun(args)
try:
if self.session.allow_pager and self.use_pager:
desc = 'Command\: %s' % '-'.join(self.prog.split())
with paging.pager_redirect(desc, **self.get_pager_spec()):
result = self.run(args)
else:
result = self.run(args)
except (SystemExit, Exception) as e:
self.postrun(args, exc=e)
self.fire_event('postrun', args, exc=e)
raise e
else:
self.postrun(args, result=result)
self.fire_event('postrun', args, result=result)
return result
def config_section(self):
""" The string used in a .<ROOT>_config file section. Usually this
is just the full prog name for a command minus the root command. """
names = []
cmd = self
while cmd.parent:
names.append(cmd.name)
cmd = cmd.parent
if not names:
return self.name
else:
return ' '.join(reversed(names))
def default_config(self):
""" Can be overridden to provide a 1 level deep dictionary of config
values. Theses values are optionally overridden by the end-user via
the session's load_config routine, that essentially looks for an INI
file where the `[section]` is the `.prog` value for this command. """
return {}
def get_config(self, section=None):
""" Return the merged end-user configuration for this command or a
specific section if set in `section`. """
config = self.session.config
section = self.config_section() if section is None else section
try:
return config[section]
except KeyError:
config.add_section(section)
return config[section]
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, parent):
""" Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree. """
self._parent = parent
if parent:
pctx = dict((x, getattr(parent, x)) for x in parent.context_keys)
self.inject_context(pctx)
self.depth = parent.depth + 1
for command in self.subcommands.values():
command.parent = self # bump.
else:
self.depth = 0
def find_root(self):
""" Traverse parent refs to top. """
cmd = self
while cmd.parent:
cmd = cmd.parent
return cmd
def inject_context(self, __context_dict__=None, **context):
""" Map context dict to this instance as attributes and keep note of
the keys being set so we can pass this along to any subcommands. """
context = context or __context_dict__
self.context_keys |= set(context.keys())
for key, value in context.items():
setattr(self, key, value)
for command in self.subcommands.values():
command.inject_context(context)
@property
def prog(self):
return self.argparser.prog
@prog.setter
def prog(self, prog):
""" Update ourself and any of our subcommands. """
self.argparser.prog = prog
fmt = '%s %%s' % prog if prog else '%s'
for command in self.subcommands.values():
command.prog = fmt % command.name
# Rebind autoenv vars with recomputed key.
for action in self._autoenv_actions:
self.argparser.unbind_env(action)
self.argparser.bind_env(action, self._make_autoenv(action))
@property
def depth(self):
return self._depth
@depth.setter
def depth(self, value):
""" Update ourself and any of our subcommands. """
for command in self.subcommands.values():
command.depth = value + 1
del command.argparser._defaults[self.arg_label_fmt % self._depth]
command.argparser._defaults[self.arg_label_fmt % value] = command
self._depth = value
def add_argument(self, *args, parser=None, autoenv=False, env=None,
complete=None, **kwargs):
""" Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. """
if parser is None:
parser = self.argparser
action = parser.add_argument(*args, **kwargs)
if autoenv:
if env is not None:
raise TypeError('Arguments `env` and `autoenv` are mutually '
'exclusive')
env = self._make_autoenv(action)
if env:
self.argparser.bind_env(action, env)
if autoenv:
self._autoenv_actions.add(action)
if complete:
action.complete = complete
return action
def _make_autoenv(self, action):
""" Generate a suitable env variable for this action. This is
dependant on our subcommand hierarchy. Review the prog setter for
details. """
env = ('%s_%s' % (self.prog, action.dest)).upper()
env = re.sub(self.env_scrub_re, '', env.strip())
env = re.sub(self.env_flatten_re, '_', env)
if re.match('^[0-9]', env):
# Handle leading numbers.
env = '_%s' % env
return env
def add_file_argument(self, *args, mode='r', buffering=1,
filetype_options=None, **kwargs):
""" Add a tab-completion safe FileType argument. This argument
differs from a normal argparse.FileType based argument in that the
value is a factory function that returns a file handle instead of
providing an already open file handle. There are various reasons
why this is a better approach but it is also required to avoid
erroneous creation of files with shellish tab completion. """
type_ = supplement.SafeFileType(mode=mode, bufsize=buffering,
**filetype_options or {})
return self.add_argument(*args, type=type_, **kwargs)
def add_table_arguments(self, *args, parser=None, **kwargs):
if parser is None:
parser = self.argparser
return layout.Table.attach_arguments(parser, *args, **kwargs)
def create_argparser(self):
""" Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. """
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
description=fulldesc)
def attach_session(self):
""" Create a session and inject it as context for this command and any
subcommands. """
assert self.session is None
root = self.find_root()
session = self.Session(root)
root.inject_context(session=session)
return session
def complete(self, text, line, begin, end):
""" Get and format completer choices. Note that all internal calls to
completer functions must use [frozen]set()s. """
self.fire_event('precomplete', text, line, begin, end)
choices = self._complete(text, line, begin, end)
choices -= self.completion_excludes
sz = len(choices)
if sz == 1:
# XXX: This is pretty bad logic here. In reality we need much
# more complicated escape handling and imbalanced quote support.
return set(shlex.quote(x) for x in choices)
elif sz > 2:
# We don't need the sentinel choice to prevent completion
# when there is already more than 1 choice.
choices -= {completer.ActionCompleter.sentinel}
self.fire_event('postcomplete', choices)
return choices
def _complete(self, text, line, begin, end):
""" Do naive argument parsing so the completer has better ability to
understand expansion rules. """
line = line[:end] # Ignore characters following the cursor.
fullargs = self.split_line(line)[1:]
args = fullargs[:]
options = self.deep_scan_parser(self.argparser)
# Walk into options tree if subcommands are detected.
last_subcommand = None
while True:
for key, completers in options.items():
if key in args and hasattr(completers[0], 'items'):
args.remove(key)
last_subcommand = key
options = completers[0]
break
else:
break
if text == last_subcommand:
# We have to specially catch the case where the last argument is
# the key used to find our subparser. More specifically when the
# cursor is not preceded by a space too, as this prevents the
# completion routines from continuing. The simplest way without
# complicating the algo for coming up with our options list is to
# simply shortcut the completer by returning a single item.
# Subsequent tabs will work normally.
return {text}
# Look for incomplete actions.
choices = set(x for x in options
if x is not None and x.startswith(text))
arg_buf = []
pos_args = []
trailing_action = None
# The slice below skips the last arg if it is 'active'.
for x in reversed(args[:-1 if text else None]):
if x in options:
action = options[x][0]
action.consume(arg_buf)
pos_args.extend(arg_buf)
del arg_buf[:]
if action.full:
choices -= {action.key}
if not trailing_action:
trailing_action = action
if not action.full:
if action.reached_min:
choices |= action(self, text, fullargs)
choices -= {action.key}
else:
choices = action(self, text, fullargs)
break
else:
arg_buf.insert(0, x)
pos_args.extend(arg_buf)
# Feed any remaining arguments in the buffer to positionals so long as
# there isn't a trailing action that can still consume.
if None in options and (not trailing_action or trailing_action.full):
for x_action in options[None]:
x_action.consume(pos_args)
if not x_action.reached_min:
choices = x_action(self, text, fullargs)
break
elif not x_action.full:
choices |= x_action(self, text, fullargs)
return choices
def split_line(self, line):
""" Try to do pure shlex.split unless it can't parse the line. In that
case we trim the input line until shlex can split the args and tack the
unparsable portion on as the last argument. """
remainder = []
while True:
try:
args = shlex.split(line)
except ValueError:
remainder.append(line[-1])
line = line[:-1]
else:
if remainder:
args.append(''.join(reversed(remainder)))
return args
@functools.lru_cache()
def deep_scan_parser(self, parser):
results = collections.defaultdict(list)
for x in parser._actions:
ac = completer.ActionCompleter(x)
if ac.subparsers:
for key, xx in ac.subparsers.items():
results[key].append(self.deep_scan_parser(xx))
else:
results[ac.key].append(ac)
return results
def get_commands_from(self, args):
""" We have to code the key names for each depth. This method scans
for each level and returns a list of the command arguments. """
commands = []
for i in itertools.count(0):
try:
commands.append(getattr(args, self.arg_label_fmt % i))
except AttributeError:
break
return commands
def add_subcommand(self, command, default=False):
if isinstance(command, type):
command = command()
command.parent = self
if command.name is None:
raise TypeError('Cannot add unnamed command: %s' % command)
if command.name in self.subcommands:
raise ValueError('Command name already added: %s' % command.name)
if not self.subparsers:
desc = 'Provide a subcommand argument to perform an operation.'
addsub = self.argparser.add_subparsers
self.subparsers = addsub(title='subcommands', description=desc,
metavar='COMMAND')
if default:
if self.default_subcommand:
raise ValueError("Default subcommand already exists.")
self.default_subcommand = command
if command.title is not None:
help_fmt = '%s (default)' if default else '%s'
help = help_fmt % command.title
else:
help = '(default)' if default else ''
command.prog = '%s %s' % (self.prog, command.name)
command.argparser._defaults[self.arg_label_fmt % self.depth] = command
action = self.subparsers._ChoicesPseudoAction(command.name, (), help)
self.subparsers._choices_actions.append(action)
self.subparsers._name_parser_map[command.name] = command.argparser
self.subcommands[command.name] = command
def remove_subcommand(self, command=None, name=None):
if name is None:
if command is None:
raise TypeError('A command or name is required')
name = command.name
command = self.subcommands.pop(name)
del self.subparsers._name_parser_map[name]
for action in self.subparsers._choices_actions:
if action.dest == name:
break
else:
raise RuntimeError("Subparser action not found for subcommand")
self.subparsers._choices_actions.remove(action)
command.session = None
command.parent = None
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The SaveCoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class SaveCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = SaveCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 11:14:44 2017
@author: ning
"""
import pandas as pd
import os
import numpy as np
from collections import Counter
from imblearn.combine import SMOTETomek
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE#,RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
try:
function_dir = 'D:\\NING - spindle\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
except:
function_dir = 'C:\\Users\\ning\\OneDrive\\python works\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
import eegPipelineFunctions
try:
file_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
# file_dir = 'D:\\NING - spindle\\training set\\road_trip_more_channels\\'
os.chdir(file_dir)
except:
file_dir = 'C:\\Users\\ning\\Downloads\\road_trip\\'
# file_dir = 'C:\\Users\\ning\\Downloads\\road_trip_more_channels\\'
os.chdir(file_dir)
################################### Random forest #################################
clf = make_pipeline(SMOTETomek(random_state=12345,kind_smote='borderline2'),
StandardScaler(),
RandomForestClassifier(n_estimators=50,random_state=12345,criterion='gini',))
# class_weight={1:1/(1-ratio)}))
signal_features_indivisual_results,graph_features_indivisual_results,combine_features_indivisual_results={},{},{}
signal_features_indivisual_results = eegPipelineFunctions.cross_validation_report(signal_features_indivisual_results,0.5,
clf_=clf,file_dir=file_dir,compute='signal')
graph_features_indivisual_results = eegPipelineFunctions.cross_validation_report(graph_features_indivisual_results,0,
clf_='RF',file_dir=file_dir,compute='graph')
combine_features_indivisual_results = eegPipelineFunctions.cross_validation_report(combine_features_indivisual_results,0,
clf_='RF',file_dir=file_dir,compute='combine')
signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_RF.csv',index=False)
graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_RF.csv',index=False)
combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_RF.csv',index=False)
################################### xgb #################################
signal_features_indivisual_results,graph_features_indivisual_results,combine_features_indivisual_results={},{},{}
signal_features_indivisual_results = eegPipelineFunctions.cross_validation_report(signal_features_indivisual_results,0,
clf_='xgb',file_dir=file_dir,compute='signal')
graph_features_indivisual_results = eegPipelineFunctions.cross_validation_report(graph_features_indivisual_results,0,
clf_='xgb',file_dir=file_dir,compute='graph')
combine_features_indivisual_results = eegPipelineFunctions.cross_validation_report(combine_features_indivisual_results,0,
clf_='xgb',file_dir=file_dir,compute='combine')
signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_xgb.csv',index=False)
graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_xgb.csv',index=False)
combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_xgb.csv',index=False)
################################### support vector machine ###########################
signal_features_indivisual_results,graph_features_indivisual_results,combine_features_indivisual_results={},{},{}
signal_features_indivisual_results = eegPipelineFunctions.cross_validation_report(signal_features_indivisual_results,0,
clf_='svm',file_dir=file_dir,compute='signal',n_estimators=1)
graph_features_indivisual_results = eegPipelineFunctions.cross_validation_report(graph_features_indivisual_results,0,
clf_='svm',file_dir=file_dir,compute='graph',n_estimators=1)
combine_features_indivisual_results = eegPipelineFunctions.cross_validation_report(combine_features_indivisual_results,0,
clf_='svm',file_dir=file_dir,compute='combine',n_estimators=1)
signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_svm.csv',index=False)
graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_svm.csv',index=False)
combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_svm.csv',index=False)
################################## logistic regression ##################################################
signal_features_indivisual_results,graph_features_indivisual_results,combine_features_indivisual_results={},{},{}
signal_features_indivisual_results = eegPipelineFunctions.cross_validation_report(signal_features_indivisual_results,0,
clf_='logistic',file_dir=file_dir,compute='signal')
graph_features_indivisual_results = eegPipelineFunctions.cross_validation_report(graph_features_indivisual_results,0,
clf_='logistic',file_dir=file_dir,compute='graph')
combine_features_indivisual_results = eegPipelineFunctions.cross_validation_report(combine_features_indivisual_results,0,
clf_='logistic',file_dir=file_dir,compute='combine')
signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_logistic.csv',index=False)
graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_logistic.csv',index=False)
combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_logistic.csv',index=False)
################################### knn #################################
signal_features_indivisual_results,graph_features_indivisual_results,combine_features_indivisual_results={},{},{}
signal_features_indivisual_results = eegPipelineFunctions.cross_validation_report(signal_features_indivisual_results,0,
clf_='knn',file_dir=file_dir,compute='signal',n_estimators=15)
graph_features_indivisual_results = eegPipelineFunctions.cross_validation_report(graph_features_indivisual_results,0,
clf_='knn',file_dir=file_dir,compute='graph',n_estimators=15)
combine_features_indivisual_results = eegPipelineFunctions.cross_validation_report(combine_features_indivisual_results,0,
clf_='knn',file_dir=file_dir,compute='combine',n_estimators=15)
signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_knn.csv',index=False)
graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_knn.csv',index=False)
combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_knn.csv',index=False)
################################ TPOT ############################################
#from sklearn.pipeline import make_pipeline, make_union
#from sklearn.decomposition import PCA
#from sklearn.neighbors import KNeighborsClassifier
#from sklearn.preprocessing import FunctionTransformer,Normalizer
#from sklearn.naive_bayes import BernoulliNB,GaussianNB
#from copy import copy
#from sklearn.svm import LinearSVC
#from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier
#from sklearn.linear_model import LogisticRegression
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.feature_selection import VarianceThreshold,SelectPercentile, f_classif
#clfs_graph = {1.5:make_pipeline(
# make_union(VotingClassifier([("est", GradientBoostingClassifier(max_depth=1,
# max_features=0.2,
# min_samples_leaf=5,
# min_samples_split=2,
# n_estimators=100,
# subsample=0.45))]),
# FunctionTransformer(copy)),
# LogisticRegression(C=0.5)),
# 2.0:LogisticRegression(C=0.1, dual=False),
# 2.5:LinearSVC(C=0.001, loss="hinge", penalty="l2", tol=0.1),
# 3.0:make_pipeline(make_union(
# Normalizer(norm="max"),
# FunctionTransformer(copy)),
# KNeighborsClassifier(n_neighbors=95, p=1)),
# 3.5:LogisticRegression(C=5.0),
# 4.0:make_pipeline(make_union(VotingClassifier([("est", BernoulliNB(alpha=100.0,
# fit_prior=True))]),
# FunctionTransformer(copy)),
# LogisticRegression(C=0.5, penalty="l2")),
# 4.5:LogisticRegression(),
# 5.0:LogisticRegression(C=0.5, dual=False, penalty="l2")}
#
#clfs_signal = {1.5:make_pipeline(VarianceThreshold(threshold=0.5),
# DecisionTreeClassifier(criterion="entropy",
# max_depth=3,
# min_samples_leaf=8,
# min_samples_split=6)),
# 2.0:make_pipeline(make_union(VotingClassifier([("est", GaussianNB())]), FunctionTransformer(copy)),
# LinearSVC(C=15.0, loss="hinge", penalty="l2", tol=0.01)),
# 2.5:LogisticRegression(C=0.01, dual=True),
# 3.0:LinearSVC(dual=True, loss="hinge", penalty="l2", tol=0.001),
# 3.5:LinearSVC(C=1.0, dual=True, loss="hinge", penalty="l2", tol=0.1),
# 4.0:make_pipeline(VarianceThreshold(threshold=0.9),
# make_union(VotingClassifier([("est", GaussianNB())]),
# FunctionTransformer(copy)),
# KNeighborsClassifier(n_neighbors=100, p=1)),
# 4.5:make_pipeline(SelectPercentile(score_func=f_classif, percentile=33),
# LogisticRegression(C=20.0, dual=False)),
# 5.0:make_pipeline(PCA(iterated_power=7, svd_solver="randomized"),GaussianNB())}
#
#signal_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[],
# 'matthews_corrcoef_mean':[],
# 'matthews_corrcoef_std':[]}
#graph_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[],
# 'matthews_corrcoef_mean':[],
# 'matthews_corrcoef_std':[]}
#combine_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[],
# 'matthews_corrcoef_mean':[],
# 'matthews_corrcoef_std':[]}
#for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
# sub_dir = file_dir + directory_1 + '\\'
# epoch_length = directory_1.split(' ')[1]
# os.chdir(sub_dir)
# #signal_features_indivisual_results[directory_1],graph_features_indivisual_results[directory_1]={},{}
# #df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
# for sub_fold in os.listdir(sub_dir):
# sub_fold_dir = sub_dir + sub_fold + '\\'
# os.chdir(sub_fold_dir)
# sub = sub_fold[:-4]
# day = sub_fold[4:][-4:]
# print(sub,day,epoch_length)
#
# cc_features, pli_features, plv_features, signal_features = [pd.read_csv(f) for f in os.listdir(sub_fold_dir) if ('csv' in f)]
# #df_cc.append(cc_features)
# #df_pli.append(pli_features)
# #df_plv.append(plv_features)
# label = cc_features['label']
# cc_features = eegPipelineFunctions.get_real_part(cc_features)
# pli_features = eegPipelineFunctions.get_real_part(pli_features)
# plv_features = eegPipelineFunctions.get_real_part(plv_features)
# cc_features.columns = ['cc_'+name for name in cc_features]
# pli_features.columns = ['pli_'+name for name in pli_features]
# plv_features.columns = ['plv_'+name for name in plv_features]
# cc_features = cc_features.drop('cc_label',1)
# pli_features = pli_features.drop('pli_label',1)
# plv_features = plv_features.drop('plv_label',1)
# df_combine = pd.concat([cc_features,pli_features,plv_features],axis=1)
# df_combine['label']=label
# df_two = pd.concat([cc_features, pli_features, plv_features, signal_features],axis=1)
# try:
# signal_temp = eegPipelineFunctions.cross_validation_with_clfs(signal_features,clf_=clfs_signal[float(epoch_length)])
# graph_temp = eegPipelineFunctions.cross_validation_with_clfs(df_combine,clf_=clfs_graph[float(epoch_length)])
# two_temp = eegPipelineFunctions.cross_validation_with_clfs(df_two,clf_=clfs_graph[float(epoch_length)])
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores,MCC=signal_temp
# signal_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# signal_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# signal_features_indivisual_results['fpr'].append(fpr)
# signal_features_indivisual_results['tpr'].append(tpr)
# signal_features_indivisual_results['precision'].append(precision)
# signal_features_indivisual_results['recall'].append(recall)
# signal_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# signal_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# signal_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# signal_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# signal_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# signal_features_indivisual_results['matthews_corrcoef_mean'].append(np.nanmean(MCC))
# signal_features_indivisual_results['matthews_corrcoef_std'].append(np.nanstd(MCC))
# signal_features_indivisual_results['subject'].append(sub)
# signal_features_indivisual_results['day'].append(int(day[-1]))
# signal_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores,MCC=graph_temp
# graph_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# graph_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# graph_features_indivisual_results['fpr'].append(fpr)
# graph_features_indivisual_results['tpr'].append(tpr)
# graph_features_indivisual_results['precision'].append(precision)
# graph_features_indivisual_results['recall'].append(recall)
# graph_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# graph_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# graph_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# graph_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# graph_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# graph_features_indivisual_results['matthews_corrcoef_mean'].append(np.nanmean(MCC))
# graph_features_indivisual_results['matthews_corrcoef_std'].append(np.nanstd(MCC))
# graph_features_indivisual_results['subject'].append(sub)
# graph_features_indivisual_results['day'].append(int(day[-1]))
# graph_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'graph:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores,MCC=two_temp
# combine_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# combine_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# combine_features_indivisual_results['fpr'].append(fpr)
# combine_features_indivisual_results['tpr'].append(tpr)
# combine_features_indivisual_results['precision'].append(precision)
# combine_features_indivisual_results['recall'].append(recall)
# combine_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# combine_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# combine_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# combine_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# combine_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# combine_features_indivisual_results['matthews_corrcoef_mean'].append(np.nanmean(MCC))
# combine_features_indivisual_results['matthews_corrcoef_std'].append(np.nanstd(MCC))
# combine_features_indivisual_results['subject'].append(sub)
# combine_features_indivisual_results['day'].append(int(day[-1]))
# combine_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# except:
# print(sub_fold,Counter(label),'not enough samples')
#signal_features_indivisual_results = pd.DataFrame(signal_features_indivisual_results)
#graph_features_indivisual_results = pd.DataFrame(graph_features_indivisual_results)
#combine_features_indivisual_results = pd.DataFrame(combine_features_indivisual_results)
#signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_TPOT.csv',index=False)
#graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_TPOT.csv',index=False)
#combine_features_indivisual_results.to_csv(file_dir+'individual_combine_feature_TPOT.csv',index=False)
#signal_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#graph_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
# sub_dir = file_dir + directory_1 + '\\'
# epoch_length = directory_1.split(' ')[1]
# os.chdir(sub_dir)
# #signal_features_indivisual_results[directory_1],graph_features_indivisual_results[directory_1]={},{}
# #df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
# for sub_fold in os.listdir(sub_dir):
# sub_fold_dir = sub_dir + sub_fold + '\\'
# os.chdir(sub_fold_dir)
# sub = sub_fold[:-4]
# day = sub_fold[4:][-4:]
# print(sub,day,epoch_length)
#
# cc_features, pli_features, plv_features, signal_features = [pd.read_csv(f) for f in os.listdir(sub_fold_dir) if ('csv' in f)]
# #df_cc.append(cc_features)
# #df_pli.append(pli_features)
# #df_plv.append(plv_features)
# label = cc_features['label']
# cc_features = eegPipelineFunctions.get_real_part(cc_features)
# pli_features = eegPipelineFunctions.get_real_part(pli_features)
# plv_features = eegPipelineFunctions.get_real_part(plv_features)
# cc_features.columns = ['cc_'+name for name in cc_features]
# pli_features.columns = ['pli_'+name for name in pli_features]
# plv_features.columns = ['plv_'+name for name in plv_features]
# cc_features = cc_features.drop('cc_label',1)
# pli_features = pli_features.drop('pli_label',1)
# plv_features = plv_features.drop('plv_label',1)
# df_combine = pd.concat([cc_features,pli_features,plv_features],axis=1)
# df_combine['label']=label
# try:
# signal_temp = eegPipelineFunctions.cross_validation_with_clfs(signal_features,clf_='logistic')
# graph_temp = eegPipelineFunctions.cross_validation_with_clfs(df_combine,clf_='logistic')
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=signal_temp
# signal_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# signal_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# signal_features_indivisual_results['fpr'].append(fpr)
# signal_features_indivisual_results['tpr'].append(tpr)
# signal_features_indivisual_results['precision'].append(precision)
# signal_features_indivisual_results['recall'].append(recall)
# signal_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# signal_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# signal_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# signal_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# signal_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# signal_features_indivisual_results['subject'].append(sub)
# signal_features_indivisual_results['day'].append(int(day[-1]))
# signal_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=graph_temp
# graph_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# graph_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# graph_features_indivisual_results['fpr'].append(fpr)
# graph_features_indivisual_results['tpr'].append(tpr)
# graph_features_indivisual_results['precision'].append(precision)
# graph_features_indivisual_results['recall'].append(recall)
# graph_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# graph_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# graph_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# graph_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# graph_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# graph_features_indivisual_results['subject'].append(sub)
# graph_features_indivisual_results['day'].append(int(day[-1]))
# graph_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'graph:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# except:
# print(sub_fold,Counter(label),'not enough samples')
#signal_features_indivisual_results = pd.DataFrame(signal_features_indivisual_results)
#graph_features_indivisual_results = pd.DataFrame(graph_features_indivisual_results)
#signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_regression.csv',index=False)
#graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_regression.csv',index=False)
#
#
#signal_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#graph_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#combine_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
# sub_dir = file_dir + directory_1 + '\\'
# epoch_length = directory_1.split(' ')[1]
# os.chdir(sub_dir)
# #signal_features_indivisual_results[directory_1],graph_features_indivisual_results[directory_1]={},{}
# #df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
# for sub_fold in os.listdir(sub_dir):
# sub_fold_dir = sub_dir + sub_fold + '\\'
# os.chdir(sub_fold_dir)
# sub = sub_fold[:-4]
# day = sub_fold[4:][-4:]
# print(sub,day,epoch_length)
# cc_features, pli_features, plv_features, signal_features = [pd.read_csv(f) for f in os.listdir(sub_fold_dir) if ('csv' in f)]
# #df_cc.append(cc_features)
# #df_pli.append(pli_features)
# #df_plv.append(plv_features)
# label = cc_features['label']
# cc_features = eegPipelineFunctions.get_real_part(cc_features)
# pli_features = eegPipelineFunctions.get_real_part(pli_features)
# plv_features = eegPipelineFunctions.get_real_part(plv_features)
# cc_features.columns = ['cc_'+name for name in cc_features]
# pli_features.columns = ['pli_'+name for name in pli_features]
# plv_features.columns = ['plv_'+name for name in plv_features]
# cc_features = cc_features.drop('cc_label',1)
# pli_features = pli_features.drop('pli_label',1)
# plv_features = plv_features.drop('plv_label',1)
# df_combine = pd.concat([cc_features,pli_features,plv_features],axis=1)
# df_combine['label']=label
# df_two = pd.concat([cc_features, pli_features, plv_features, signal_features],axis=1)
# try:
# signal_temp = eegPipelineFunctions.cross_validation_with_clfs(signal_features,clf_='RF')
# graph_temp = eegPipelineFunctions.cross_validation_with_clfs(df_combine,clf_='RF')
# two_temp = eegPipelineFunctions.cross_validation_with_clfs(df_two,clf_='RF')
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores,MCC=signal_temp
# signal_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# signal_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# signal_features_indivisual_results['fpr'].append(fpr)
# signal_features_indivisual_results['tpr'].append(tpr)
# signal_features_indivisual_results['precision'].append(precision)
# signal_features_indivisual_results['recall'].append(recall)
# signal_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# signal_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# signal_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# signal_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# signal_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# signal_features_indivisual_results['subject'].append(sub)
# signal_features_indivisual_results['day'].append(int(day[-1]))
# signal_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(MCC),np.std(MCC)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=graph_temp
# graph_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# graph_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# graph_features_indivisual_results['fpr'].append(fpr)
# graph_features_indivisual_results['tpr'].append(tpr)
# graph_features_indivisual_results['precision'].append(precision)
# graph_features_indivisual_results['recall'].append(recall)
# graph_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# graph_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# graph_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# graph_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# graph_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# graph_features_indivisual_results['subject'].append(sub)
# graph_features_indivisual_results['day'].append(int(day[-1]))
# graph_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'graph:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=two_temp
# combine_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# combine_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# combine_features_indivisual_results['fpr'].append(fpr)
# combine_features_indivisual_results['tpr'].append(tpr)
# combine_features_indivisual_results['precision'].append(precision)
# combine_features_indivisual_results['recall'].append(recall)
# combine_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# combine_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# combine_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# combine_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# combine_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# combine_features_indivisual_results['subject'].append(sub)
# combine_features_indivisual_results['day'].append(int(day[-1]))
# combine_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# except:
# print(sub_fold,Counter(label),'not enough samples')
#signal_features_indivisual_results = pd.DataFrame(signal_features_indivisual_results)
#graph_features_indivisual_results = pd.DataFrame(graph_features_indivisual_results)
#signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_RF.csv',index=False)
#graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_RF.csv',index=False)
##pickle.dump(signal_features_indivisual_results,open(file_dir+'individual_signal_feature_RF.p','wb'))
##pickle.dump(graph_features_indivisual_results,open(file_dir+'individual_graph_feature_RF.p','wb'))
#
#
#signal_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#graph_features_indivisual_results = {'subject':[],'day':[],'epoch_length':[],
# 'auc_score_mean':[],'auc_score_std':[],
# 'fpr':[],'tpr':[],
# 'precision':[],'recall':[],
# 'precision_mean':[],'precision_std':[],
# 'recall_mean':[],'recall_std':[],
# 'area_under_precision_recall':[]}
#for directory_1 in [f for f in os.listdir(file_dir) if ('epoch_length' in f)]:
# sub_dir = file_dir + directory_1 + '\\'
# epoch_length = directory_1.split(' ')[1]
# os.chdir(sub_dir)
# #signal_features_indivisual_results[directory_1],graph_features_indivisual_results[directory_1]={},{}
# #df_cc, df_pli, df_plv, df_signal,df_graph = [],[],[],[],[]
# for sub_fold in os.listdir(sub_dir):
# sub_fold_dir = sub_dir + sub_fold + '\\'
# os.chdir(sub_fold_dir)
# sub = sub_fold[:-4]
# day = sub_fold[4:][-4:]
# print(sub,day,epoch_length)
#
# cc_features, pli_features, plv_features, signal_features = [pd.read_csv(f) for f in os.listdir(sub_fold_dir) if ('csv' in f)]
# #df_cc.append(cc_features)
# #df_pli.append(pli_features)
# #df_plv.append(plv_features)
# label = cc_features['label']
# cc_features = eegPipelineFunctions.get_real_part(cc_features)
# pli_features = eegPipelineFunctions.get_real_part(pli_features)
# plv_features = eegPipelineFunctions.get_real_part(plv_features)
# cc_features.columns = ['cc_'+name for name in cc_features]
# pli_features.columns = ['pli_'+name for name in pli_features]
# plv_features.columns = ['plv_'+name for name in plv_features]
# cc_features = cc_features.drop('cc_label',1)
# pli_features = pli_features.drop('pli_label',1)
# plv_features = plv_features.drop('plv_label',1)
# df_combine = pd.concat([cc_features,pli_features,plv_features],axis=1)
# df_combine['label']=label
# try:
# signal_temp = eegPipelineFunctions.cross_validation_with_clfs(signal_features,clf_='svm')
# graph_temp = eegPipelineFunctions.cross_validation_with_clfs(df_combine,clf_='svm')
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=signal_temp
# signal_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# signal_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# signal_features_indivisual_results['fpr'].append(fpr)
# signal_features_indivisual_results['tpr'].append(tpr)
# signal_features_indivisual_results['precision'].append(precision)
# signal_features_indivisual_results['recall'].append(recall)
# signal_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# signal_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# signal_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# signal_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# signal_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# signal_features_indivisual_results['subject'].append(sub)
# signal_features_indivisual_results['day'].append(int(day[-1]))
# signal_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'signal:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# auc_score,fpr,tpr,precision,recall,precision_scores,recall_scores,average_scores=graph_temp
# graph_features_indivisual_results['auc_score_mean'].append(np.nanmean(auc_score))
# graph_features_indivisual_results['auc_score_std'].append(np.std(auc_score))
# graph_features_indivisual_results['fpr'].append(fpr)
# graph_features_indivisual_results['tpr'].append(tpr)
# graph_features_indivisual_results['precision'].append(precision)
# graph_features_indivisual_results['recall'].append(recall)
# graph_features_indivisual_results['precision_mean'].append(np.nanmean(precision_scores))
# graph_features_indivisual_results['precision_std'].append(np.std(precision_scores))
# graph_features_indivisual_results['recall_mean'].append(np.nanmean(recall_scores))
# graph_features_indivisual_results['recall_std'].append(np.std(recall_scores))
# graph_features_indivisual_results['area_under_precision_recall'].append(average_scores)
# graph_features_indivisual_results['subject'].append(sub)
# graph_features_indivisual_results['day'].append(int(day[-1]))
# graph_features_indivisual_results['epoch_length'].append(float(epoch_length))
# print(sub_fold,Counter(label),'graph:%.2f +/-%.2f'%(np.nanmean(auc_score),np.std(auc_score)))
# except:
# print(sub_fold,Counter(label),'not enough samples')
#signal_features_indivisual_results = pd.DataFrame(signal_features_indivisual_results)
#graph_features_indivisual_results = pd.DataFrame(graph_features_indivisual_results)
#signal_features_indivisual_results.to_csv(file_dir+'individual_signal_feature_svm.csv',index=False)
#graph_features_indivisual_results.to_csv(file_dir+'individual_graph_feature_svm.csv',index=False)
| |
# import subprocess
from pyglet.sprite import Sprite
from functions import *
from main import logger
import random
class Mob(Sprite):
"""The main mob constructor"""
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1Q"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 14.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 1.0
self.bounty = 1 # Gold awarded for killing mob
self.spawn(game)
def spawn(self, game):
self.g = game
self.debug = game.debug
self.id = self.g.mob_count
self.g.mob_count += 1
s = game.grid.start
self.x_offset = random.randrange( # Offset for drawing position
-self.g.squaresize // 8,
self.g.squaresize // 8
)
self.y_offset = random.randrange(
-self.g.squaresize // 8,
self.g.squaresize // 8
)
self.x = game.window.getWindowPos(s[0], s[1])[0]
self.y = game.window.getWindowPos(s[0], s[1])[1] # Drawing position
self.rx = self.x
self.ry = self.y # Real position, which is used in game logic
self.state = "alive"
self.hp_max = self.hp
self.orig_spd = self.spd
self.slow_cd = None
self.lastpoint = None
self.stall_timer = None
self.debuff_list = []
self.currentpoint = s
self.point = 0
self.path = False
if self.move_type == "flying":
self.path = game.grid.getPath(self.currentpoint, flying=True)
if not self.path:
self.path = game.grid.path
try:
self.targetpoint = self.path[1]
except IndexError:
logger.debug("Targetpoint not found in path, setting it to 0,0!!")
self.targetpoint = (0, 0)
logger.debug(
"Spawning mob ID{2}: {0}hp, {1}spd".format(
self.hp_max, self.spd, self.id
)
)
def setDebuff(self, d_type, **kwargs):
debuff = None
if d_type == "slow":
slow = kwargs.items()[0][1]
time = kwargs.items()[1][1]
debuff = Debuff(self, d_type, time, slow=slow)
elif d_type == "poison":
dmg = kwargs.items()[0][1]
time = kwargs.items()[1][1]
debuff = Debuff(self, d_type, time, dmg=dmg)
elif d_type == "stun":
time = kwargs.items()[0][1]
debuff = Debuff(self, d_type, time)
if debuff:
debuff.update()
self.debuff_list.append(debuff)
else:
logger.debug("Could not set debuff for type {0}".format(d_type))
def updateOffset(self):
s = self.currentpoint
self.x, self.y = self.g.window.getWindowPos(s[0], s[1])
self.rx, self.ry = self.x, self.y
def updatePos(self, dt=0):
if (
not self.stall_timer and (self not in self.g.pf_queue)
and self.spd > 0.0
):
points = self.path
tp = self.targetpoint
if tp in points and tp in self.g.grid.w_grid:
targetpos = self.g.window.getWindowPos(tp[0], tp[1])
if get_dist(targetpos[0], targetpos[1], self.rx, self.ry) < 2:
self.lastpoint = self.currentpoint
self.currentpoint = self.targetpoint
if self.currentpoint == self.g.grid.goal:
logger.info("Mob reached goal.")
self.state = "reached_goal"
else:
self.point += 1
try:
self.targetpoint = points[self.point]
except IndexError:
self.g.pf_queue.append(self)
logger.debug(
"Reached pos {0}, new target is {1}".format(
self.currentpoint, self.targetpoint
)
)
else:
if (self not in self.g.pf_queue):
rads = get_angle(
self.rx, self.ry,
targetpos[0], targetpos[1]
)
self.rx = self.rx + (self.spd + dt) * math.cos(rads)
self.ry = self.ry - (self.spd + dt) * math.sin(rads)
self.x = self.rx + self.x_offset
self.y = self.ry + self.y_offset
else:
if (self not in self.g.pf_queue):
logger.debug("Need to recalculate mob route.")
self.g.pf_queue.append(self)
def updateTarget(self):
if not self.state == "stalled":
logger.debug("Updating target for mob {0}".format(self.id))
logger.debug("currentpoint: {0}".format(self.currentpoint))
logger.debug("target_pos: {0}".format(self.targetpoint))
logger.debug("point: {0}".format(self.point))
self.point = 0
g = self.g.grid
share = False
if self.targetpoint in g.path:
self.path = g.path
self.point = g.path.index(self.targetpoint)
try:
self.targetpoint = g.path[self.point + 1]
except IndexError:
logger.debug("Target point out of range, panick!")
self.targetpoint = g.goal
share = True
else:
genpath = True
share = True
for p in g.path:
if (
abs(self.targetpoint[0] - p[0]) <= 1 and
abs(self.targetpoint[1] - p[1]) <= 1
):
if self.targetpoint in get_diagonal(
g.w_grid, p[0], p[1]
):
genpath = False
share = True
self.targetpoint = p
break
elif (
self.targetpoint in get_neighbors(
g.w_grid, p[0], p[1]
)
):
genpath = False
share = True
self.targetpoint = p
break
if genpath:
logger.debug(
"Mob {0} had to generate new path.".format(self.id)
)
newpath = g.getPath(self.currentpoint)
if newpath:
self.path = newpath
if len(newpath) > 1:
self.targetpoint = newpath[1]
else:
self.targetpoint = newpath[0]
# if pathfinding is not successfull, stall for a second
else:
logger.debug("Mob is stalling!")
self.state = "stalled"
self.stall_timer = self.g.window.fps * 2
else:
logger.debug(
"New path was nearby, mob {0} rejoined it.".format(
self.id
)
)
self.path = g.path
self.point = g.path.index(self.targetpoint) - 1
# Shares path if mobs nearby is awaiting update
if share and not self.state == "stalled":
for m in self.g.pf_queue:
if m.id != self.id:
if m.currentpoint == self.currentpoint:
m.point = self.point
m.targetpoint = self.targetpoint
m.path = self.path
self.g.pf_queue.remove(m)
logger.debug("Shared path with nearby mob.")
def kill(self):
logger.debug("Mob {0} died at x:{1}, y:{2}".format(
self.id, self.x, self.y
)
)
i = 0
while i < 3: # Spawn three blood splats
x = self.x + random.randrange(-8, 8)
y = self.y + random.randrange(-8, 8)
self.g.window.blood_fx.addParticle(
x, y, (1, 0.1, 0.1, 1)
)
i += 1
self.debuff_list = []
if self in self.g.pf_queue:
self.g.pf_queue.remove(self)
self.g.gold += self.bounty
self.g.mobs.remove(self)
self.g.window.playSFX("splat", 0.7)
def updateState(self):
self.debug = self.g.debug
if self.state == "dead":
self.kill()
elif self.state == "reached_goal":
logger.info("You are leaking!")
if self in self.g.pf_queue:
self.g.pf_queue.remove(self)
self.debuff_list = []
self.g.mobs.remove(self)
self.g.leaking()
elif self.state == "stalled":
if self.stall_timer > 0:
self.stall_timer -= 1
else:
self.stall_timer = None
self.state = "alive"
self.updateTarget()
else: # If none of the states apply
slowed, stunned = False, False
for d in self.debuff_list:
if d.d_type == "slow":
slowed = True
elif d.d_type == "stun":
stunned = True
d.update()
if stunned:
self.speed = 0.0
elif not slowed:
self.spd = self.orig_spd
class Mob1W(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1W"],
batch=game.window.batches["mobs"]
)
self.variant = variant
self.move_type = "normal"
self.hp = 35.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.9
self.bounty = 3
self.spawn(game)
class Mob1E(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1E"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 180.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.6
self.bounty = 10
self.spawn(game)
class Mob1R(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1R"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 80.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 1.8
self.bounty = 10
self.spawn(game)
class Mob1A(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1A"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 300.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.9
self.bounty = 18
self.spawn(game)
class Mob1S(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1S"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 300.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 1.25
self.bounty = 20
self.spawn(game)
class Mob1D(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1D"],
batch=game.window.batches["mobs"]
)
self.state = "alive"
self.move_type = "normal"
self.variant = variant
self.hp = 400.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 1.0
self.bounty = 25
self.spawn(game)
class Mob1F(Mob):
"""Flying mob"""
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1F"],
batch=game.window.batches["flying_mobs"]
)
# Adds this mob to the batch with towers
self.move_type = "flying"
self.variant = variant
self.hp = 120.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.9
self.bounty = 12
self.spawn(game)
def updatePos(self, dt=0):
if not self.stall_timer and (self not in self.g.pf_queue):
points = self.path
tp = self.targetpoint
if tp in points and tp in self.g.grid.fullgrid:
targetpos = self.g.window.getWindowPos(tp[0], tp[1])
if get_dist(targetpos[0], targetpos[1], self.rx, self.ry) < 2:
self.lastpoint = self.currentpoint
self.currentpoint = self.targetpoint
if self.currentpoint == self.g.grid.goal:
logger.debug("Mob reached goal.")
self.state = "reached_goal"
else:
self.point += 1
self.targetpoint = points[self.point]
logger.debug(
"Reached pos {0}, new target is {1}".format(
self.currentpoint, self.targetpoint
)
)
else:
if (self not in self.g.pf_queue):
rads = get_angle(
self.rx, self.ry,
targetpos[0], targetpos[1]
)
self.rx = self.rx + (self.spd + dt) * math.cos(rads)
self.ry = self.ry - (self.spd + dt) * math.sin(rads)
self.x = self.rx + self.x_offset
self.y = self.ry + self.y_offset
elif (self in self.g.pf_queue):
self.g.pf_queue.remove(self) # Never calculate path for flying
def updateTarget(self):
if not self.state == "stalled":
logger.debug("Updating target for flying mob.")
self.point = 0
if self.targetpoint in self.path:
self.point = self.path.index(self.targetpoint)
try:
self.targetpoint = self.path[self.point + 1]
except IndexError:
logger.debug("Target point out of range, panick!")
self.targetpoint = g.goal
else:
pass
class Mob1Z(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1Z"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 900.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.5
self.bounty = 35
self.spawn(game)
class Mob1X(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1X"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 800.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.8
self.bounty = 40
self.spawn(game)
class Mob1C(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1C"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 900.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 1.0
self.bounty = 50
self.spawn(game)
class Mob1V(Mob):
def __init__(self, game, variant="YAY", debug=False):
super(Mob, self).__init__(
game.window.textures["mob1V"],
batch=game.window.batches["mobs"]
)
self.move_type = "normal"
self.variant = variant
self.hp = 2000.0
self.def_type = 0 # 0 Normal, 1 Magic, 2 Chaos
self.spd = 0.5
self.bounty = 75
self.spawn(game)
class Debuff:
def __init__(self, owner, d_type, time, **kwargs):
self.owner = owner
self.d_type = d_type
self.time = time * self.owner.g.window.fps
if self.d_type == "slow":
self.slow = kwargs.items()[0][1]
elif self.d_type == "poison":
self.dmg = kwargs.items()[0][1]
elif self.d_type == "stun":
pass
def update(self):
if self.time > 0:
self.doEffect()
self.time -= 1
else:
self.owner.spd = self.owner.orig_spd
self.owner.debuff_list.remove(self)
def doEffect(self):
if self.d_type == "slow":
newspeed = ((100 - self.slow) * self.owner.orig_spd / 100.0)
if newspeed <= self.owner.spd:
if newspeed < 0.0:
newspeed = 0.0
self.owner.spd = newspeed
elif self.d_type == "poison":
if self.time % self.owner.g.window.fps == 0:
if self.owner.hp > 0:
self.owner.hp -= self.dmg
self.owner.g.window.puff_fx.addParticle(
self.owner.x + random.randrange(-8, 9),
self.owner.y + random.randrange(-6, 7),
(0.55, 1, 0.45, 0.5)
)
self.owner.g.window.skull_fx.addParticle(
self.owner.x + random.randrange(0, 12),
self.owner.y + random.randrange(0, 12),
(0.10, 0.3, 0.10, 0.8),
velocity=(
random.randrange(-6, 6),
random.randrange(8, 24),
0
)
)
else:
self.owner.state = "dead"
elif self.d_type == "stun":
if self.time % self.owner.g.window.fps // 2 == 0:
self.owner.g.window.lightning_fx.addParticle(
self.owner.x, self.owner.y, (0.7, 0.8, 0.8, 0.7)
)
self.owner.spd = 0.0
| |
import io, os, subprocess, wave
import math, audioop, collections
import json
from urllib2 import Request, urlopen
class AudioSource:
def __init__(self):
raise NotImplementedError("Abstract Class")
def __enter__(self):
raise NotImplementedError("Abstract Class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("Abstract Class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 44100
self.CHANNELS = 1
self.CHUNK = 2205
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format,
rate = self.RATE,
channels = self.CHANNELS,
frames_per_buffer = self.CHUNK,
input = True,
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language="en-US", key="AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 100
self.pause_threshold = 0.2
self.quiet_duration = 0.2
def samp2flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
wavWriter = wave.open(wav_file, "wb")
try:
wavWriter.setsampwidth(source.SAMPLE_WIDTH)
wavWriter.setnchannels(source.CHANNELS)
wavWriter.setframerate(source.RATE)
wavWriter.writeframes(frame_data)
finally:
wavWriter.close()
wav_data = wav_file.getvalue()
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__))
flacConverter = shExists("flac")
cmd = subprocess.Popen("/usr/local/bin/flac --stdout --totally-silent --best -", stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flacData, stderr = cmd.communicate(wav_data)
return flacData
def record(self, source, duration = 2):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.RATE
elapsed_time = 0
while True:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frameData = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samp2flac(source, frameData))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer))
quiet_buffer_count = int(math.ceil(self.quiet_duration / seconds_per_buffer))
elapsed_time = 0
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.append(buffer)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH)
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count:
frames.popleft()
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.append(buffer)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH)
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count:
break
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop()
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samp2flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
assert isinstance(audio_data, AudioData)
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
try:
response = urlopen(self.request)
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
def shExists(pgm):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
| |
import re
from flaskapp.meta import mail
from flaskapp.models import User
from . import WebsiteTestCase, users
class AuthTests(WebsiteTestCase):
def test_create_account(self):
# test form page
resp = self.client.get(self.url_for('auth.create_account'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(b'<legend>Create a new account</legend>' \
in resp.data)
email = users['userA']['email']
passwd = users['userA']['password']
# test bad email
resp = self.create_account('barack', passwd)
# test bad password confirmation
resp = self.create_account(email, passwd, 'a')
self.assertTrue(b'Passwords must match' in resp.data)
# test good data
resp = self.create_account(email, passwd, passwd)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.headers['Location'],
self.url_for('content.home'))
resp = self.client.get(self.url_for('content.home'))
self.assertTrue(email in resp.data.decode('utf-8'))
def test_logout(self):
email = users['userA']['email']
passwd = users['userA']['password']
self.create_account(email, passwd, passwd)
resp = self.client.get(self.url_for('content.home'))
self.assertTrue(email in resp.data.decode('utf-8'))
resp = self.logout()
self.assertEqual(resp.status_code, 302)
resp = self.client.get(self.url_for('content.home'))
self.assertFalse(email in resp.data.decode('utf-8'))
def test_login(self):
# test form page
resp = self.client.get(self.url_for('auth.login'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(b'<legend>Log in to your account</legend>' \
in resp.data)
email = users['userA']['email']
passwd = users['userA']['password']
# test invalid login
resp = self.login(email, passwd)
self.assertTrue(b'Email and password must match' in resp.data)
# create account
self.create_account(email, passwd)
resp = self.client.get(self.url_for('content.home'))
self.assertTrue(email in resp.data.decode('utf-8'))
self.logout()
# test valid login
resp = self.login(email, passwd)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.headers['Location'],
self.url_for('content.home'))
resp = self.client.get(self.url_for('content.home'))
self.assertTrue(email in resp.data.decode('utf-8'))
def test_forgot(self):
email = users['userA']['email']
passwd = users['userA']['password']
self.create_account(email, passwd)
self.logout()
# test forgot form
resp = self.client.get(self.url_for('auth.forgot'))
self.assertTrue(b'<legend>Reset your password</legend>' in resp.data)
# test bad submission
data = dict(email='doesntexist@example.com')
resp = self.client.post(self.url_for('auth.forgot'), data=data)
self.assertTrue(b'not registered' in resp.data)
# test good submission
with mail.record_messages() as outbox:
data = dict(email=email)
resp = self.client.post(self.url_for('auth.forgot'), data=data)
self.assertTrue(b'Success' in resp.data)
self.assertEqual(len(outbox), 1)
self.assertEqual(outbox[0].subject, 'Password Reset Request')
# get reset url
m = re.search('/auth/reset-password.*$', outbox[0].body)
reset_url = m.group(0)
# test that key works
resp = self.client.get(reset_url)
self.assertTrue(b'Choose a new password' in resp.data)
def test_reset_password(self):
email = users['userA']['email']
passwd = users['userA']['password']
self.create_account(email, passwd)
self.logout()
# get reset url
with mail.record_messages() as outbox:
data = dict(email=email)
resp = self.client.post(self.url_for('auth.forgot'), data=data)
self.assertTrue(b'Success' in resp.data)
self.assertEqual(len(outbox), 1)
self.assertEqual(outbox[0].subject, 'Password Reset Request')
m = re.search('/auth/reset-password.*$', outbox[0].body)
reset_url = m.group(0)
# test bad request
resp = self.client.get(self.url_for('auth.reset_password'))
self.assertTrue(b'Error' in resp.data)
self.assertEqual(resp.status_code, 400)
# test bad key
u = self.url_for('auth.reset_password', key='badkey')
resp = self.client.get(u)
self.assertTrue(b'Error' in resp.data)
# test good key, bad email
u = re.sub('email=.*?&|$', '', reset_url) + 'email=bademail'
resp = self.client.get(u)
self.assertTrue(b'Error' in resp.data)
# test good request
resp = self.client.get(reset_url)
self.assertEqual(resp.status_code, 200)
# test submission
data = dict(password='newpasswd', password_confirm='newpasswd')
resp = self.client.post(reset_url, data=data)
self.assertEqual(resp.status_code, 200)
self.assertTrue(b'Success' in resp.data)
# check that user is logged in
resp = self.client.get('/')
self.assertTrue(email in resp.data.decode('utf-8'))
# check that new password works
self.logout()
self.login(email, data['password'])
resp = self.client.get('/')
self.assertTrue(email in resp.data.decode('utf-8'))
def test_email_verification_request(self):
email = users['userA']['email']
passwd = users['userA']['password']
self.create_account(email, passwd)
# check user
with self.app.app_context():
u = User.query.filter(User.email == email).first()
self.assertEqual(u.is_verified, False)
# test form
resp = self.client.get(self.url_for('auth.email_verification_request'))
self.assertTrue(b'<legend>Send verification request</legend>' \
in resp.data)
# test submission
with mail.record_messages() as outbox:
url = self.url_for('auth.email_verification_request')
resp = self.client.post(url)
self.assertTrue(b'Success' in resp.data)
self.assertEqual(len(outbox), 1)
self.assertEqual(outbox[0].subject,
'Flaskapp Account: Please Confirm Email')
# get reset url
m = re.search('/auth/verify-email.*$', outbox[0].body)
verify_url = m.group(0)
# test bad request
resp = self.client.get(self.url_for('auth.verify_email'))
self.assertTrue(b'Error' in resp.data)
self.assertEqual(resp.status_code, 400)
# test bad key
u = self.url_for('auth.verify_email', key='badkey')
resp = self.client.get(u)
self.assertTrue(b'Error' in resp.data)
# test good key, bad email
verify_url2 = re.sub('email=.*?&|$', '', verify_url) \
+ 'email=bademail'
resp = self.client.get(verify_url2)
self.assertTrue(b'Error' in resp.data)
# test good request
resp = self.client.get(verify_url)
self.assertTrue(b'Your email has been verified' in resp.data)
# check user
with self.app.app_context():
u = User.query.filter(User.email == email).first()
self.assertEqual(u.is_verified, True)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The implementations of the hdfs clients. The hadoop cli client and the
snakebite client.
"""
from luigi.target import FileAlreadyExists
from luigi.contrib.hdfs.config import load_hadoop_cmd
from luigi.contrib.hdfs import abstract_client as hdfs_abstract_client
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import error as hdfs_error
import logging
import subprocess
import datetime
import os
import re
import warnings
logger = logging.getLogger('luigi-interface')
def create_hadoopcli_client():
"""
Given that we want one of the hadoop cli clients (unlike snakebite),
this one will return the right one.
"""
version = hdfs_config.get_configured_hadoop_version()
if version == "cdh4":
return HdfsClient()
elif version == "cdh3":
return HdfsClientCdh3()
elif version == "apache1":
return HdfsClientApache1()
else:
raise ValueError("Error: Unknown version specified in Hadoop version"
"configuration parameter")
class HdfsClient(hdfs_abstract_client.HdfsFileSystem):
"""
This client uses Apache 2.x syntax for file system commands, which also matched CDH4.
"""
recursive_listdir_cmd = ['-ls', '-R']
@staticmethod
def call_check(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise hdfs_error.HDFSCliError(command, p.returncode, stdout, stderr)
return stdout
def exists(self, path):
"""
Use ``hadoop fs -stat`` to check file existence.
"""
cmd = load_hadoop_cmd() + ['fs', '-stat', path]
logger.debug('Running file existence check: %s', subprocess.list2cmdline(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
else:
not_found_pattern = "^.*No such file or directory$"
not_found_re = re.compile(not_found_pattern)
for line in stderr.split('\n'):
if not_found_re.match(line):
return False
raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
def rename(self, path, dest):
parent_dir = os.path.dirname(dest)
if parent_dir != '' and not self.exists(parent_dir):
self.mkdir(parent_dir)
if not isinstance(path, (list, tuple)):
path = [path]
else:
warnings.warn("Renaming multiple files at once is not atomic.", stacklevel=2)
self.call_check(load_hadoop_cmd() + ['fs', '-mv'] + path + [dest])
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rm', '-r']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
self.call_check(cmd)
def chmod(self, path, permissions, recursive=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chmod', '-R', permissions, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chmod', permissions, path]
self.call_check(cmd)
def chown(self, path, owner, group, recursive=False):
if owner is None:
owner = ''
if group is None:
group = ''
ownership = "%s:%s" % (owner, group)
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chown', '-R', ownership, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chown', ownership, path]
self.call_check(cmd)
def count(self, path):
cmd = load_hadoop_cmd() + ['fs', '-count', path]
stdout = self.call_check(cmd)
lines = stdout.split('\n')
for line in stdout.split('\n'):
if line.startswith("OpenJDK 64-Bit Server VM warning") or line.startswith("It's highly recommended") or not line:
lines.pop(lines.index(line))
else:
(dir_count, file_count, content_size, ppath) = stdout.split()
results = {'content_size': content_size, 'dir_count': dir_count, 'file_count': file_count}
return results
def copy(self, path, destination):
self.call_check(load_hadoop_cmd() + ['fs', '-cp', path, destination])
def put(self, local_path, destination):
self.call_check(load_hadoop_cmd() + ['fs', '-put', local_path, destination])
def get(self, path, local_destination):
self.call_check(load_hadoop_cmd() + ['fs', '-get', path, local_destination])
def getmerge(self, path, local_destination, new_line=False):
if new_line:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', '-nl', path, local_destination]
else:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', path, local_destination]
self.call_check(cmd)
def mkdir(self, path, parents=True, raise_if_exists=False):
if parents and raise_if_exists:
raise NotImplementedError("HdfsClient.mkdir can't raise with -p")
try:
cmd = (load_hadoop_cmd() + ['fs', '-mkdir'] +
(['-p'] if parents else []) +
[path])
self.call_check(cmd)
except hdfs_error.HDFSCliError as ex:
if "File exists" in ex.stderr:
if raise_if_exists:
raise FileAlreadyExists(ex.stderr)
else:
raise
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False, recursive=False):
if not path:
path = "." # default to current/home catalog
if recursive:
cmd = load_hadoop_cmd() + ['fs'] + self.recursive_listdir_cmd + [path]
else:
cmd = load_hadoop_cmd() + ['fs', '-ls', path]
lines = self.call_check(cmd).split('\n')
for line in lines:
if not line:
continue
elif line.startswith('OpenJDK 64-Bit Server VM warning') or line.startswith('It\'s highly recommended') or line.startswith('Found'):
continue # "hadoop fs -ls" outputs "Found %d items" as its first line
elif ignore_directories and line[0] == 'd':
continue
elif ignore_files and line[0] == '-':
continue
data = line.split(' ')
file = data[-1]
size = int(data[-4])
line_type = line[0]
extra_data = ()
if include_size:
extra_data += (size,)
if include_type:
extra_data += (line_type,)
if include_time:
time_str = '%sT%s' % (data[-3], data[-2])
modification_time = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M')
extra_data += (modification_time,)
if len(extra_data) > 0:
yield (file,) + extra_data
else:
yield file
def touchz(self, path):
self.call_check(load_hadoop_cmd() + ['fs', '-touchz', path])
class HdfsClientCdh3(HdfsClient):
"""
This client uses CDH3 syntax for file system commands.
"""
def mkdir(self, path):
"""
No -p switch, so this will fail creating ancestors.
"""
try:
self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path])
except hdfs_error.HDFSCliError as ex:
if "File exists" in ex.stderr:
raise FileAlreadyExists(ex.stderr)
else:
raise
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rmr']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
self.call_check(cmd)
class HdfsClientApache1(HdfsClientCdh3):
"""
This client uses Apache 1.x syntax for file system commands,
which are similar to CDH3 except for the file existence check.
"""
recursive_listdir_cmd = ['-lsr']
def exists(self, path):
cmd = load_hadoop_cmd() + ['fs', '-test', '-e', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
elif p.returncode == 1:
return False
else:
raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
| |
from contextlib import contextmanager
import gc
import os
import os.path
from alembic.config import Config
from alembic import command
from maxminddb.const import MODE_AUTO
from sqlalchemy import (
event,
inspect,
)
from sqlalchemy.schema import (
DropTable,
MetaData,
Table,
)
from webtest import TestApp
from ichnaea.api.locate.searcher import (
configure_country_searcher,
configure_position_searcher,
)
from ichnaea.async.app import celery_app
from ichnaea.async.config import (
init_worker,
shutdown_worker,
)
from ichnaea.cache import configure_redis
from ichnaea.config import DummyConfig
from ichnaea.constants import GEOIP_CITY_ACCURACY
from ichnaea.db import configure_db
from ichnaea.geocalc import country_max_radius
from ichnaea.geoip import configure_geoip
from ichnaea.http import configure_http_session
from ichnaea.log import (
configure_raven,
configure_stats,
DebugRavenClient,
DebugStatsClient,
)
from ichnaea.models import _Model, ApiKey
from ichnaea.webapp.config import main
# make new unittest API's available under Python 2.6
try:
from unittest2 import TestCase # NOQA
except ImportError:
from unittest import TestCase
TEST_DIRECTORY = os.path.dirname(__file__)
DATA_DIRECTORY = os.path.join(TEST_DIRECTORY, 'data')
GEOIP_TEST_FILE = os.path.join(DATA_DIRECTORY, 'GeoIP2-City-Test.mmdb')
GEOIP_BAD_FILE = os.path.join(
DATA_DIRECTORY, 'GeoIP2-Connection-Type-Test.mmdb')
SQLURI = os.environ.get('SQLURI')
REDIS_URI = os.environ.get('REDIS_URI', 'redis://localhost:6379/1')
SESSION = {}
# Some test-data constants
TEST_CONFIG = DummyConfig({
'assets': {
'bucket': 'localhost.bucket',
'url': 'http://127.0.0.1:7001/static/',
},
'export:test': {
'url': None,
'skip_keys': 'export',
'batch': '3',
},
'export:internal': {
'url': 'internal://',
'metadata': 'true',
'batch': '0',
},
'import:ocid': {
'url': 'http://127.0.0.1:9/downloads/',
'apikey': 'xxxxxxxx-yyyy-xxxx-yyyy-xxxxxxxxxxxx',
},
'locate:fallback': {
'url': 'http://127.0.0.1:9/?api',
'ratelimit': '10',
'ratelimit_expire': '60',
'ratelimit_interval': '5',
'cache_expire': '60',
},
})
GEOIP_DATA = {
'London': {
'city': True,
'country_code': 'GB',
'country_name': 'United Kingdom',
'ip': '81.2.69.192',
'latitude': 51.5142,
'longitude': -0.0931,
'accuracy': GEOIP_CITY_ACCURACY,
},
'Bhutan': {
'city': False,
'country_code': 'BT',
'country_name': 'Bhutan',
'ip': '67.43.156.1',
'latitude': 27.5,
'longitude': 90.5,
'accuracy': country_max_radius('BT'),
},
}
GB_LAT = 51.5
GB_LON = -0.1
GB_MCC = 234
GB_MNC = 30
def _make_db(uri=SQLURI):
return configure_db(uri)
def _make_redis(uri=REDIS_URI):
return configure_redis(uri)
def _make_app(app_config=TEST_CONFIG,
_db_rw=None, _db_ro=None, _http_session=None, _geoip_db=None,
_raven_client=None, _redis_client=None, _stats_client=None,
_country_searcher=None, _position_searcher=None):
wsgiapp = main(
app_config,
_db_rw=_db_rw,
_db_ro=_db_ro,
_geoip_db=_geoip_db,
_http_session=_http_session,
_raven_client=_raven_client,
_redis_client=_redis_client,
_stats_client=_stats_client,
_country_searcher=_country_searcher,
_position_searcher=_position_searcher,
)
return TestApp(wsgiapp)
class LogTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(LogTestCase, cls).setUpClass()
# Use a debug configuration
cls.raven_client = configure_raven(
None, transport='sync', _client=DebugRavenClient())
cls.stats_client = configure_stats(
None, _client=DebugStatsClient(tag_support=True))
@classmethod
def tearDownClass(cls):
super(LogTestCase, cls).tearDownClass()
del cls.raven_client
del cls.stats_client
def setUp(self):
super(LogTestCase, self).setUp()
self._unexpected_errors = []
def tearDown(self):
super(LogTestCase, self).tearDown()
self.assert_no_unexpected_raven_errors()
del self._unexpected_errors
self.raven_client._clear()
self.stats_client._clear()
def find_stats_messages(self, msg_type, msg_name,
msg_value=None, msg_tags=(), _client=None):
data = {
'counter': [],
'timer': [],
'gauge': [],
'histogram': [],
'meter': [],
'set': [],
}
if _client is None:
client = self.stats_client
else:
client = _client
for msg in client.msgs:
tags = ()
if '|#' in msg:
parts = msg.split('|#')
tags = parts[-1].split(',')
msg = parts[0]
suffix = msg.split('|')[-1]
name, value = msg.split('|')[0].split(':')
value = int(value)
if suffix == 'g':
data['gauge'].append((name, value, tags))
elif suffix == 'ms':
data['timer'].append((name, value, tags))
elif suffix.startswith('c'):
data['counter'].append((name, value, tags))
elif suffix == 'h':
data['histogram'].append((name, value, tags))
elif suffix == 'm':
data['meter'].append((name, value, tags))
elif suffix == 's':
data['set'].append((name, value, tags))
result = []
for msg in data.get(msg_type):
if msg[0] == msg_name:
if msg_value is None or msg[1] == msg_value:
if not msg_tags or msg[2] == msg_tags:
result.append((msg[0], msg[1], msg[2]))
return result
def check_raven(self, expected=None):
"""Checks the raven message stream looking for the expected messages.
The expected argument should be a list of either names or tuples.
If it is a tuple, it should be a tuple of name and an expected count.
The names are matched via startswith against the captured exception
messages.
"""
msgs = self.raven_client.msgs
found_msgs = [msg['message'] for msg in msgs]
matched_msgs = []
if expected is None:
expected = ()
for exp in expected:
count = 1
name = exp
if isinstance(exp, tuple):
name, count = exp
matches = [found for found in found_msgs if found.startswith(name)]
matched_msgs.extend(matches)
self.assertEqual(len(matches), count, found_msgs)
self._unexpected_errors = [msg for msg in msgs
if msg['message'] not in matched_msgs]
def assert_no_unexpected_raven_errors(self):
self.assertEqual(len(self._unexpected_errors), 0,
self._unexpected_errors)
def check_stats(self, _client=None, total=None, **kw):
"""Checks a partial specification of messages to be found in
the stats message stream.
"""
if _client is None:
client = self.stats_client
else:
client = _client
if total is not None:
self.assertEqual(total, len(client.msgs), client.msgs)
for (msg_type, preds) in kw.items():
for pred in preds:
match = 1
value = None
tags = ()
if isinstance(pred, str):
name = pred
elif isinstance(pred, tuple):
if len(pred) == 2:
(name, match) = pred
if isinstance(match, list):
tags = match
match = 1
elif len(pred) == 3:
(name, match, value) = pred
if isinstance(value, list):
tags = value
value = None
elif len(pred) == 4:
(name, match, value, tags) = pred
else:
raise TypeError('wanted 2, 3 or 4 tuple, got %s'
% type(pred))
else:
raise TypeError('wanted str or tuple, got %s'
% type(pred))
msgs = self.find_stats_messages(
msg_type, name, value, tags, _client=client)
if isinstance(match, int):
self.assertEqual(match, len(msgs),
msg='%s %s not found' % (msg_type, name))
class DBTestCase(LogTestCase):
# Inspired by a blog post:
# http://sontek.net/blog/detail/writing-tests-for-pyramid-and-sqlalchemy
default_session = 'db_rw_session'
track_connection_events = False
@contextmanager
def db_call_checker(self):
try:
self.setup_db_event_tracking()
yield self.check_db_calls
finally:
self.teardown_db_event_tracking()
def check_db_calls(self, rw=None, ro=None):
if rw is not None:
events = self.db_events['rw']['calls']
self.assertEqual(len(events), rw, events)
if ro is not None:
events = self.db_events['ro']['calls']
self.assertEqual(len(events), ro, events)
def reset_db_event_tracking(self):
self.db_events = {
'rw': {'calls': [], 'handler': None},
'ro': {'calls': [], 'handler': None},
}
def setup_db_event_tracking(self):
self.reset_db_event_tracking()
def scoped_conn_event_handler(calls):
def conn_event_handler(**kw):
calls.append((kw['statement'], kw['parameters']))
return conn_event_handler
rw_handler = scoped_conn_event_handler(self.db_events['rw']['calls'])
self.db_events['rw']['handler'] = rw_handler
event.listen(self.rw_conn, 'before_cursor_execute',
rw_handler, named=True)
ro_handler = scoped_conn_event_handler(self.db_events['ro']['calls'])
self.db_events['ro']['handler'] = ro_handler
event.listen(self.ro_conn, 'before_cursor_execute',
ro_handler, named=True)
def teardown_db_event_tracking(self):
event.remove(self.ro_conn, 'before_cursor_execute',
self.db_events['ro']['handler'])
event.remove(self.rw_conn, 'before_cursor_execute',
self.db_events['rw']['handler'])
self.reset_db_event_tracking()
def setUp(self):
super(DBTestCase, self).setUp()
self.rw_conn = self.db_rw.engine.connect()
self.rw_trans = self.rw_conn.begin()
self.db_rw.session_factory.configure(bind=self.rw_conn)
self.db_rw_session = self.db_rw.session()
self.ro_conn = self.db_ro.engine.connect()
self.ro_trans = self.ro_conn.begin()
self.db_ro.session_factory.configure(bind=self.ro_conn)
self.db_ro_session = self.db_ro.session()
# set up a default session
default_session = getattr(self, self.default_session)
setattr(self, 'session', default_session)
SESSION['default'] = default_session
if self.track_connection_events:
self.setup_db_event_tracking()
def tearDown(self):
super(DBTestCase, self).tearDown()
if self.track_connection_events:
self.teardown_db_event_tracking()
del SESSION['default']
del self.session
self.ro_trans.rollback()
self.db_ro_session.close()
del self.db_ro_session
self.db_ro.session_factory.configure(bind=None)
self.ro_trans.close()
del self.ro_trans
self.ro_conn.close()
del self.ro_conn
self.rw_trans.rollback()
self.db_rw_session.close()
del self.db_rw_session
self.db_rw.session_factory.configure(bind=None)
self.rw_trans.close()
del self.rw_trans
self.rw_conn.close()
del self.rw_conn
@classmethod
def setUpClass(cls):
super(DBTestCase, cls).setUpClass()
cls.db_rw = _make_db()
cls.db_ro = _make_db()
@classmethod
def tearDownClass(cls):
super(DBTestCase, cls).tearDownClass()
cls.db_rw.engine.pool.dispose()
del cls.db_rw
cls.db_ro.engine.pool.dispose()
del cls.db_ro
@classmethod
def setup_tables(cls, engine):
with engine.connect() as conn:
trans = conn.begin()
_Model.metadata.create_all(engine)
# Now stamp the latest alembic version
alembic_cfg = Config()
alembic_cfg.set_section_option('alembic',
'script_location',
'alembic')
alembic_cfg.set_section_option('alembic',
'sqlalchemy.url',
str(engine.url))
command.stamp(alembic_cfg, 'head')
trans.commit()
@classmethod
def cleanup_tables(cls, engine):
# reflect and delete all tables, not just those known to
# our current code version / models
metadata = MetaData()
inspector = inspect(engine)
tables = []
with engine.connect() as conn:
trans = conn.begin()
for t in inspector.get_table_names():
tables.append(Table(t, metadata))
for t in tables:
conn.execute(DropTable(t))
trans.commit()
class HTTPTestCase(object):
@classmethod
def setUpClass(cls):
super(HTTPTestCase, cls).setUpClass()
cls.http_session = configure_http_session(size=1)
@classmethod
def tearDownClass(cls):
super(HTTPTestCase, cls).tearDownClass()
cls.http_session.close()
del cls.http_session
class GeoIPTestCase(LogTestCase):
geoip_data = GEOIP_DATA
@classmethod
def _open_db(cls, filename=None, mode=MODE_AUTO):
if filename is None:
filename = GEOIP_TEST_FILE
return configure_geoip(
filename, mode=mode, raven_client=cls.raven_client)
@classmethod
def setUpClass(cls):
super(GeoIPTestCase, cls).setUpClass()
cls.geoip_db = cls._open_db()
@classmethod
def tearDownClass(cls):
super(GeoIPTestCase, cls).tearDownClass()
del cls.geoip_db
class RedisTestCase(LogTestCase):
@classmethod
def setUpClass(cls):
super(RedisTestCase, cls).setUpClass()
cls.redis_client = _make_redis()
@classmethod
def tearDownClass(cls):
super(RedisTestCase, cls).tearDownClass()
cls.redis_client.connection_pool.disconnect()
del cls.redis_client
def tearDown(self):
super(RedisTestCase, self).tearDown()
self.redis_client.flushdb()
class ConnectionTestCase(DBTestCase, GeoIPTestCase,
HTTPTestCase, RedisTestCase):
pass
class APITestCase(ConnectionTestCase):
@classmethod
def setUpClass(cls):
super(APITestCase, cls).setUpClass()
for name, func in (('country_searcher', configure_country_searcher),
('position_searcher', configure_position_searcher)):
searcher = func(
TEST_CONFIG,
geoip_db=cls.geoip_db, raven_client=cls.raven_client,
redis_client=cls.redis_client, stats_client=cls.stats_client)
setattr(cls, name, searcher)
@classmethod
def tearDownClass(cls):
super(APITestCase, cls).tearDownClass()
del cls.country_searcher
del cls.position_searcher
class AppTestCase(APITestCase):
default_session = 'db_ro_session'
@classmethod
def setUpClass(cls):
super(AppTestCase, cls).setUpClass()
cls.app = _make_app(app_config=TEST_CONFIG,
_db_rw=cls.db_rw,
_db_ro=cls.db_ro,
_http_session=cls.http_session,
_geoip_db=cls.geoip_db,
_raven_client=cls.raven_client,
_redis_client=cls.redis_client,
_stats_client=cls.stats_client,
_country_searcher=cls.country_searcher,
_position_searcher=cls.position_searcher)
@classmethod
def tearDownClass(cls):
super(AppTestCase, cls).tearDownClass()
del cls.app
class CeleryTestCase(ConnectionTestCase):
@classmethod
def setUpClass(cls):
super(CeleryTestCase, cls).setUpClass()
cls.celery_app = celery_app
init_worker(
celery_app, TEST_CONFIG,
_db_rw=cls.db_rw,
_geoip_db=cls.geoip_db,
_raven_client=cls.raven_client,
_redis_client=cls.redis_client,
_stats_client=cls.stats_client)
@classmethod
def tearDownClass(cls):
super(CeleryTestCase, cls).tearDownClass()
shutdown_worker(celery_app)
del cls.celery_app
class CeleryAppTestCase(AppTestCase, CeleryTestCase):
default_session = 'db_rw_session'
def setup_package(module):
# look for memory leaks
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
# make sure all models are imported
from ichnaea.models import base # NOQA
from ichnaea.models import content # NOQA
db = _make_db()
engine = db.engine
DBTestCase.cleanup_tables(engine)
DBTestCase.setup_tables(engine)
# always add a test API key
session = db.session()
session.add(ApiKey(valid_key='test', log=True, shortname='test'))
session.add(ApiKey(valid_key='export', log=False, shortname='export'))
session.commit()
session.close()
db.engine.pool.dispose()
def teardown_package(module):
if gc.garbage:
print('Uncollectable objects found:')
for obj in gc.garbage:
print(obj)
| |
"""Alexa entity adapters."""
import logging
from typing import TYPE_CHECKING, List
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
camera,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
__version__,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers import network
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaCameraStreamController,
AlexaCapability,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
if TYPE_CHECKING:
from .config import AbstractConfig
_LOGGER = logging.getLogger(__name__)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates a device that emits pleasant odors and masks unpleasant odors in interior spaces.
AIR_FRESHENER = "AIR_FRESHENER"
# Indicates a device that improves the quality of air in interior spaces.
AIR_PURIFIER = "AIR_PURIFIER"
# Indicates a smart device in an automobile, such as a dash camera.
AUTO_ACCESSORY = "AUTO_ACCESSORY"
# Indicates a security device with video or photo functionality.
CAMERA = "CAMERA"
# Indicates a religious holiday decoration that often contains lights.
CHRISTMAS_TREE = "CHRISTMAS_TREE"
# Indicates a device that makes coffee.
COFFEE_MAKER = "COFFEE_MAKER"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door.
# Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a wearable device that transmits audio directly into the ear.
HEADPHONES = "HEADPHONES"
# Indicates a smart-home hub.
HUB = "HUB"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Indicates a device that prints.
PRINTER = "PRINTER"
# Indicates a network router.
ROUTER = "ROUTER"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates a security system.
SECURITY_SYSTEM = "SECURITY_SYSTEM"
# Indicates an electric cooking device that sits on a countertop, cooks at low temperatures,
# and is often shaped like a cooking pot.
SLOW_COOKER = "SLOW_COOKER"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a vacuum cleaner.
VACUUM_CLEANER = "VACUUM_CLEANER"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
def generate_alexa_id(entity_id: str) -> str:
"""Return the alexa ID for an entity ID."""
return entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass: HomeAssistant, config: "AbstractConfig", entity: State):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return generate_alexa_id(self.entity.entity_id)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability) -> AlexaCapability:
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
def interfaces(self) -> List[AlexaCapability]:
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
yield from interface.serialize_properties()
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
"additionalAttributes": {
"manufacturer": "Home Assistant",
"model": self.entity.domain,
"softwareVersion": __version__,
"customIdentifier": self.entity_id,
},
}
locale = self.config.locale
capabilities = []
for i in self.interfaces():
if locale not in i.supported_locales:
continue
try:
capabilities.append(i.serialize_discovery())
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error serializing %s discovery for %s", i.name(), self.entity
)
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
if self.entity.domain == automation.DOMAIN:
return [DisplayCategory.ACTIVITY_TRIGGER]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class not in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
elif supported & media_player.const.SUPPORT_VOLUME_STEP:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(
media_player.const.ATTR_INPUT_SOURCE_LIST, []
)
)
if len(inputs) > 0:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
if supported & media_player.const.SUPPORT_SELECT_SOUND_MODE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST, [])
)
if len(inputs) > 0:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=True),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield AlexaPowerController(self.entity)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.VACUUM_CLEANER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
(supported & vacuum.SUPPORT_TURN_ON) or (supported & vacuum.SUPPORT_START)
) and (
(supported & vacuum.SUPPORT_TURN_OFF)
or (supported & vacuum.SUPPORT_RETURN_HOME)
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(camera.DOMAIN)
class CameraCapabilities(AlexaEntity):
"""Class to represent Camera capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
if self._check_requirements():
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & camera.SUPPORT_STREAM:
yield AlexaCameraStreamController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def _check_requirements(self):
"""Check the hass URL for HTTPS scheme."""
if "stream" not in self.hass.config.components:
_LOGGER.debug(
"%s requires stream component for AlexaCameraStreamController",
self.entity_id,
)
return False
try:
network.get_url(
self.hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError:
_LOGGER.debug(
"%s requires HTTPS for AlexaCameraStreamController", self.entity_id
)
return False
return True
| |
from unittest import TestCase
import json, requests
from jsonschema import validate
import socket
import unittest
ipServer = socket.gethostbyname(socket.gethostname())
port = "10000"
URLBASE = "http://127.0.0.1:" + port
URISOBA = "/api/soba/v1/occupants"
stringTemplate = {"type": "string"}
numberTemplate = {"type": "number"}
N = 1
class APITest(TestCase):
def test_general_gets(self, *args, **kwargs):
print(str('Testing {}').format('GET /api/soba/v1/occupants'))
template = {
"type": "object",
"properties": {
"occupants": {
"type": "array"
}
},
"required": ["occupants"]
}
for i in range(N):
url = URLBASE + URISOBA
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for o in datajson["occupants"]:
validate(o, numberTemplate)
print(str('Testing {}').format('GET /api/soba/v1/occupants/movements'))
template = {
"type": "object",
"properties": {
"orientation": {
"type": "string"
},
"speed": {
"type": "number"
}
},
"required": ["orientation", "speed"]
}
template2 = {
"type": "object"
}
for i in range(N):
url = URLBASE + URISOBA + "/movements"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template2)
for k, v in datajson.items():
validate(k, stringTemplate)
validate(int(k), numberTemplate)
validate(v, template)
print(str('Testing {}').format('GET /api/soba/v1/occupants/positions'))
template = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(N):
url = URLBASE + URISOBA + "/positions"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
for k, v in datajson.items():
validate(k, stringTemplate)
validate(int(k), numberTemplate)
validate(v, template)
print(str('Testing {}').format('GET /api/soba/v1/occupants/states'))
for i in range(N):
url = URLBASE + URISOBA + "/states"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
for k,v in datajson.items():
validate(v, stringTemplate)
validate(k, stringTemplate)
validate(int(k), numberTemplate)
print(str('Testing {}').format('GET /api/soba/v1/occupants/{id}'))
template = {
"type": "object",
"properties": {
"occupant":{
"type": "object",
"properties": {
"state":{
"type": "string"
},
"fov": {
"type": "array"
},
"unique_id":{
"type": "string"
},
"movement": {
"type": "object",
"properties": {
"orientation":{
"type": "string"
},
"speed":{
"type": "number"
},
},
"required": ["orientation", "speed"]
},
"position": {
"type": "object",
"properties": {
"x":{
"type": "number"
},
"y":{
"type": "number"
}
},
"required": ["x", "y"]
}
},
"required": ["state", "fov", "unique_id", "movement", "position"]
}
},
"required": ["occupant"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0)
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
validate(int(datajson['occupant']['unique_id']), numberTemplate)
print(template)
for p in datajson['occupant']['fov']:
validate(p, template2)
print(str('Testing {}').format('GET /api/soba/v1/occupants/{id}/movement'))
template = {
"type": "object",
"properties": {
"movement":{
"type": "object",
"properties": {
"orientation": {
"type": "string"
},
"speed": {
"type": "number"
}
},
"required": ["orientation", "speed"]
}
},
"required": ["movement"]
}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0) + "/movement"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
print(str('Testing {}').format('GET /api/soba/v1/occupants/{id}/position'))
template = {
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
},
"required": ["position"]
}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
print(str('Testing {}').format('GET /api/soba/v1/occupants/{id}/state'))
template = {
"type": "object",
"properties":{
"state": {
"type": "string"
}
},
"required": ["state"]
}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0) + "/state"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
print(str('Testing {}').format('GET /api/soba/v1/occupants/{id}/fov'))
template = {
"type": "object",
"properties": {
"fov": {
"type": "array"
}
},
"required": ["fov"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0) + "/fov"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for p in datajson['fov']:
validate(p, template2)
print(str('Testing {}').format('PUT /api/soba/v1/occupants/{id}'))
template = {
"type": "object",
"properties": {
"avatar":{
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number",
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
},
"id":{
"type": "number"
}
},
"required": ["position", "id"]
}
},
"required": ["avatar"]
}
dataBody = {"x": 10, "y": 10}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(0)
data = requests.put(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
print(str('Testing {}').format('POST /api/soba/v1/occupants/{id}/position'))
template = {
"type": "object",
"properties": {
"avatar":{
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number",
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
},
"id":{
"type": "number"
}
},
"required": ["position", "id"]
}
},
"required": ["avatar"]
}
dataBody = {"x": 11, "y": 11}
for i in range(N):
url = URLBASE + URISOBA + "/" + str(100000) + "/position"
data = requests.post(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
| |
from __future__ import print_function
import json
import os
import sys
import unittest
from django.core.exceptions import ImproperlyConfigured
from environ import Env, Path, REDIS_DRIVER
class BaseTests(unittest.TestCase):
URL = 'http://www.google.com/'
POSTGRES = 'postgres://uf07k1:wegauwhg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722'
MYSQL = 'mysql://bea6eb0:69772142@us-cdbr-east.cleardb.com/heroku_97681?reconnect=true'
MYSQLGIS = 'mysqlgis://user:password@127.0.0.1/some_database'
SQLITE = 'sqlite:////full/path/to/your/database/file.sqlite'
ORACLE_TNS = 'oracle://user:password@sid/'
ORACLE = 'oracle://user:password@host:1521/sid'
MEMCACHE = 'memcache://127.0.0.1:11211'
REDIS = 'rediscache://127.0.0.1:6379:1?client_class=django_redis.client.DefaultClient&password=secret'
EMAIL = 'smtps://user@domain.com:password@smtp.example.com:587'
JSON = dict(one='bar', two=2, three=33.44)
DICT = dict(foo='bar', test='on')
PATH = '/home/dev'
@classmethod
def generateData(cls):
return dict(STR_VAR='bar',
INT_VAR='42',
FLOAT_VAR='33.3',
FLOAT_COMMA_VAR='33,3',
FLOAT_STRANGE_VAR1='123,420,333.3',
FLOAT_STRANGE_VAR2='123.420.333,3',
BOOL_TRUE_VAR='1',
BOOL_TRUE_VAR2='True',
BOOL_FALSE_VAR='0',
BOOL_FALSE_VAR2='False',
PROXIED_VAR='$STR_VAR',
INT_LIST='42,33',
INT_TUPLE='(42,33)',
STR_LIST_WITH_SPACES=' foo, bar',
EMPTY_LIST='',
DICT_VAR='foo=bar,test=on',
DATABASE_URL=cls.POSTGRES,
DATABASE_MYSQL_URL=cls.MYSQL,
DATABASE_MYSQL_GIS_URL=cls.MYSQLGIS,
DATABASE_SQLITE_URL=cls.SQLITE,
DATABASE_ORACLE_URL=cls.ORACLE,
DATABASE_ORACLE_TNS_URL=cls.ORACLE_TNS,
CACHE_URL=cls.MEMCACHE,
CACHE_REDIS=cls.REDIS,
EMAIL_URL=cls.EMAIL,
URL_VAR=cls.URL,
JSON_VAR=json.dumps(cls.JSON),
PATH_VAR=cls.PATH)
def setUp(self):
self._old_environ = os.environ
os.environ = Env.ENVIRON = self.generateData()
self.env = Env()
def tearDown(self):
os.environ = self._old_environ
def assertTypeAndValue(self, type_, expected, actual):
self.assertEqual(type_, type(actual))
self.assertEqual(expected, actual)
class EnvTests(BaseTests):
def test_not_present_with_default(self):
self.assertEqual(3, self.env('not_present', default=3))
def test_not_present_without_default(self):
self.assertRaises(ImproperlyConfigured, self.env, 'not_present')
def test_str(self):
self.assertTypeAndValue(str, 'bar', self.env('STR_VAR'))
self.assertTypeAndValue(str, 'bar', self.env.str('STR_VAR'))
def test_int(self):
self.assertTypeAndValue(int, 42, self.env('INT_VAR', cast=int))
self.assertTypeAndValue(int, 42, self.env.int('INT_VAR'))
def test_int_with_none_default(self):
self.assertTrue(self.env('NOT_PRESENT_VAR', cast=int, default=None) is None)
def test_float(self):
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_VAR', cast=float))
self.assertTypeAndValue(float, 33.3, self.env.float('FLOAT_VAR'))
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_COMMA_VAR', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR1', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR2', cast=float))
def test_bool_true(self):
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR', cast=bool))
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR2', cast=bool))
self.assertTypeAndValue(bool, True, self.env.bool('BOOL_TRUE_VAR'))
def test_bool_false(self):
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR', cast=bool))
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR2', cast=bool))
self.assertTypeAndValue(bool, False, self.env.bool('BOOL_FALSE_VAR'))
def test_proxied_value(self):
self.assertTypeAndValue(str, 'bar', self.env('PROXIED_VAR'))
def test_int_list(self):
self.assertTypeAndValue(list, [42, 33], self.env('INT_LIST', cast=[int]))
self.assertTypeAndValue(list, [42, 33], self.env.list('INT_LIST', int))
def test_int_tuple(self):
self.assertTypeAndValue(tuple, (42, 33), self.env('INT_LIST', cast=(int,)))
self.assertTypeAndValue(tuple, (42, 33), self.env.tuple('INT_LIST', int))
self.assertTypeAndValue(tuple, ('42', '33'), self.env.tuple('INT_LIST'))
def test_str_list_with_spaces(self):
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env('STR_LIST_WITH_SPACES', cast=[str]))
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env.list('STR_LIST_WITH_SPACES'))
def test_empty_list(self):
self.assertTypeAndValue(list, [], self.env('EMPTY_LIST', cast=[int]))
def test_dict_value(self):
self.assertTypeAndValue(dict, self.DICT, self.env.dict('DICT_VAR'))
def test_dict_parsing(self):
self.assertEqual({'a': '1'}, self.env.parse_value('a=1', dict))
self.assertEqual({'a': 1}, self.env.parse_value('a=1', dict(value=int)))
self.assertEqual({'a': ['1', '2', '3']}, self.env.parse_value('a=1,2,3', dict(value=[str])))
self.assertEqual({'a': [1, 2, 3]}, self.env.parse_value('a=1,2,3', dict(value=[int])))
self.assertEqual({'a': 1, 'b': [1.1, 2.2], 'c': 3},
self.env.parse_value('a=1;b=1.1,2.2;c=3', dict(value=int, cast=dict(b=[float]))))
self.assertEqual({'a': "uname", 'c': "http://www.google.com", 'b': True},
self.env.parse_value('a=uname;c=http://www.google.com;b=True', dict(value=str, cast=dict(b=bool))))
def test_url_value(self):
url = self.env.url('URL_VAR')
self.assertEqual(url.__class__, self.env.URL_CLASS)
self.assertEqual(url.geturl(), self.URL)
self.assertEqual(None, self.env.url('OTHER_URL', default=None))
def test_url_encoded_parts(self):
from six.moves import urllib
password_with_unquoted_characters = "#password"
encoded_url = "mysql://user:%s@127.0.0.1:3306/dbname" % urllib.parse.quote(password_with_unquoted_characters)
parsed_url = self.env.db_url_config(encoded_url)
self.assertEqual(parsed_url['PASSWORD'], password_with_unquoted_characters)
def test_db_url_value(self):
pg_config = self.env.db()
self.assertEqual(pg_config['ENGINE'], 'django.db.backends.postgresql_psycopg2')
self.assertEqual(pg_config['NAME'], 'd8r82722')
self.assertEqual(pg_config['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(pg_config['USER'], 'uf07k1')
self.assertEqual(pg_config['PASSWORD'], 'wegauwhg')
self.assertEqual(pg_config['PORT'], 5431)
mysql_config = self.env.db('DATABASE_MYSQL_URL')
self.assertEqual(mysql_config['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(mysql_config['NAME'], 'heroku_97681')
self.assertEqual(mysql_config['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(mysql_config['USER'], 'bea6eb0')
self.assertEqual(mysql_config['PASSWORD'], '69772142')
self.assertEqual(mysql_config['PORT'], '')
mysql_gis_config = self.env.db('DATABASE_MYSQL_GIS_URL')
self.assertEqual(mysql_gis_config['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(mysql_gis_config['NAME'], 'some_database')
self.assertEqual(mysql_gis_config['HOST'], '127.0.0.1')
self.assertEqual(mysql_gis_config['USER'], 'user')
self.assertEqual(mysql_gis_config['PASSWORD'], 'password')
self.assertEqual(mysql_gis_config['PORT'], '')
oracle_config = self.env.db('DATABASE_ORACLE_TNS_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], '')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertFalse('PORT' in oracle_config)
oracle_config = self.env.db('DATABASE_ORACLE_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], 'host')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertEqual(oracle_config['PORT'], '1521')
sqlite_config = self.env.db('DATABASE_SQLITE_URL')
self.assertEqual(sqlite_config['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(sqlite_config['NAME'], '/full/path/to/your/database/file.sqlite')
def test_cache_url_value(self):
cache_config = self.env.cache_url()
self.assertEqual(cache_config['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(cache_config['LOCATION'], '127.0.0.1:11211')
redis_config = self.env.cache_url('CACHE_REDIS')
self.assertEqual(redis_config['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(redis_config['LOCATION'], 'redis://127.0.0.1:6379:1')
self.assertEqual(redis_config['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_email_url_value(self):
email_config = self.env.email_url()
self.assertEqual(email_config['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(email_config['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(email_config['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(email_config['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(email_config['EMAIL_PORT'], 587)
self.assertEqual(email_config['EMAIL_USE_TLS'], True)
def test_json_value(self):
self.assertEqual(self.JSON, self.env.json('JSON_VAR'))
def test_path(self):
root = self.env.path('PATH_VAR')
self.assertTypeAndValue(Path, Path(self.PATH), root)
class FileEnvTests(EnvTests):
def setUp(self):
super(FileEnvTests, self).setUp()
Env.ENVIRON = {}
self.env = Env()
file_path = Path(__file__, is_file=True)('test_env.txt')
self.env.read_env(file_path, PATH_VAR=Path(__file__, is_file=True).__root__)
class SubClassTests(EnvTests):
def setUp(self):
super(SubClassTests, self).setUp()
self.CONFIG = self.generateData()
class MyEnv(Env):
ENVIRON = self.CONFIG
self.env = MyEnv()
def test_singleton_environ(self):
self.assertTrue(self.CONFIG is self.env.ENVIRON)
class SchemaEnvTests(BaseTests):
def test_schema(self):
env = Env(INT_VAR=int, NOT_PRESENT_VAR=(float, 33.3), STR_VAR=str,
INT_LIST=[int], DEFAULT_LIST=([int], [2]))
self.assertTypeAndValue(int, 42, env('INT_VAR'))
self.assertTypeAndValue(float, 33.3, env('NOT_PRESENT_VAR'))
self.assertTypeAndValue(str, 'bar', env('STR_VAR'))
self.assertTypeAndValue(str, 'foo', env('NOT_PRESENT2', default='foo'))
self.assertTypeAndValue(list, [42, 33], env('INT_LIST'))
self.assertTypeAndValue(list, [2], env('DEFAULT_LIST'))
# Override schema in this one case
self.assertTypeAndValue(str, '42', env('INT_VAR', cast=str))
class DatabaseTestSuite(unittest.TestCase):
def test_postgres_parsing(self):
url = 'postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.postgresql_psycopg2')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_postgis_parsing(self):
url = 'postgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.postgis')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_mysql_gis_parsing(self):
url = 'mysqlgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_cleardb_parsing(self):
url = 'mysql://bea6eb025ca0d8:69772142@us-cdbr-east.cleardb.com/heroku_97681db3eff7580?reconnect=true'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'heroku_97681db3eff7580')
self.assertEqual(url['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(url['USER'], 'bea6eb025ca0d8')
self.assertEqual(url['PASSWORD'], '69772142')
self.assertEqual(url['PORT'], '')
def test_mysql_no_password(self):
url = 'mysql://travis@localhost/test_db'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'test_db')
self.assertEqual(url['HOST'], 'localhost')
self.assertEqual(url['USER'], 'travis')
self.assertEqual(url['PASSWORD'], '')
self.assertEqual(url['PORT'], '')
def test_empty_sqlite_url(self):
url = 'sqlite://'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_memory_sqlite_url(self):
url = 'sqlite://:memory:'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_database_options_parsing(self):
url = 'postgres://user:pass@host:1234/dbname?conn_max_age=600'
url = Env.db_url_config(url)
self.assertEqual(url['CONN_MAX_AGE'], 600)
url = 'mysql://user:pass@host:1234/dbname?init_command=SET storage_engine=INNODB'
url = Env.db_url_config(url)
self.assertEqual(url['OPTIONS'], {
'init_command': 'SET storage_engine=INNODB',
})
def test_database_ldap_url(self):
url = 'ldap://cn=admin,dc=nodomain,dc=org:some_secret_password@ldap.nodomain.org/'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'ldapdb.backends.ldap')
self.assertEqual(url['HOST'], 'ldap.nodomain.org')
self.assertEqual(url['PORT'], '')
self.assertEqual(url['NAME'], 'ldap://ldap.nodomain.org')
self.assertEqual(url['USER'], 'cn=admin,dc=nodomain,dc=org')
self.assertEqual(url['PASSWORD'], 'some_secret_password')
class CacheTestSuite(unittest.TestCase):
def test_memcache_parsing(self):
url = 'memcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_pylib_parsing(self):
url = 'pymemcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.PyLibMCCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_multiple_parsing(self):
url = 'memcache://172.19.26.240:11211,172.19.26.242:11212'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], ['172.19.26.240:11211', '172.19.26.242:11212'])
def test_memcache_socket_parsing(self):
url = 'memcache:///tmp/memcached.sock'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], 'unix:/tmp/memcached.sock')
def test_dbcache_parsing(self):
url = 'dbcache://my_cache_table'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.db.DatabaseCache')
self.assertEqual(url['LOCATION'], 'my_cache_table')
def test_filecache_parsing(self):
url = 'filecache:///var/tmp/django_cache'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
def test_filecache_windows_parsing(self):
url = 'filecache://C:/foo/bar'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], 'C:/foo/bar')
def test_locmem_parsing(self):
url = 'locmemcache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], '')
def test_locmem_named_parsing(self):
url = 'locmemcache://unique-snowflake'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], 'unique-snowflake')
def test_dummycache_parsing(self):
url = 'dummycache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.dummy.DummyCache')
self.assertEqual(url['LOCATION'], '')
def test_redis_parsing(self):
url = 'rediscache://127.0.0.1:6379:1?client_class=django_redis.client.DefaultClient&password=secret'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], 'redis://127.0.0.1:6379:1')
self.assertEqual(url['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_redis_socket_parsing(self):
url = 'rediscache:///path/to/socket:1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(url['LOCATION'], 'unix:///path/to/socket:1')
def test_redis_with_password_parsing(self):
url = 'rediscache://:redispass@127.0.0.1:6379/0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'redis://:redispass@127.0.0.1:6379/0')
def test_redis_socket_url(self):
url = 'redis://:redispass@/path/to/socket.sock?db=0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'unix://:redispass@/path/to/socket.sock')
self.assertEqual(url['OPTIONS'], {
'DB': 0
})
def test_options_parsing(self):
url = 'filecache:///var/tmp/django_cache?timeout=60&max_entries=1000&cull_frequency=0'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
self.assertEqual(url['TIMEOUT'], 60)
self.assertEqual(url['OPTIONS'], {
'MAX_ENTRIES': 1000,
'CULL_FREQUENCY': 0,
})
def test_custom_backend(self):
url = 'memcache://127.0.0.1:5400?foo=option&bars=9001'
backend = 'django_redis.cache.RedisCache'
url = Env.cache_url_config(url, backend)
self.assertEqual(url['BACKEND'], backend)
self.assertEqual(url['LOCATION'], '127.0.0.1:5400')
self.assertEqual(url['OPTIONS'], {
'FOO': 'option',
'BARS': 9001,
})
class SearchTestSuite(unittest.TestCase):
solr_url = 'solr://127.0.0.1:8983/solr'
elasticsearch_url = 'elasticsearch://127.0.0.1:9200/index'
whoosh_url = 'whoosh:///home/search/whoosh_index'
xapian_url = 'xapian:///home/search/xapian_index'
simple_url = 'simple:///'
def test_solr_parsing(self):
url = Env.search_url_config(self.solr_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr')
def test_solr_multicore_parsing(self):
timeout = 360
index = 'solr_index'
url = '%s/%s?TIMEOUT=%s' % (self.solr_url, index, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr/solr_index')
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_elasticsearch_parsing(self):
timeout = 360
url = '%s?TIMEOUT=%s' % (self.elasticsearch_url, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine')
self.assertTrue('INDEX_NAME' in url.keys())
self.assertEqual(url['INDEX_NAME'], 'index')
self.assertTrue('TIMEOUT' in url.keys())
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('PATH' not in url)
def test_whoosh_parsing(self):
storage = 'file' # or ram
post_limit = 128 * 1024 * 1024
url = '%s?STORAGE=%s&POST_LIMIT=%s' % (self.whoosh_url, storage, post_limit)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.whoosh_backend.WhooshEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/whoosh_index')
self.assertTrue('STORAGE' in url.keys())
self.assertEqual(url['STORAGE'], storage)
self.assertTrue('POST_LIMIT' in url.keys())
self.assertEqual(url['POST_LIMIT'], post_limit)
self.assertTrue('INDEX_NAME' not in url)
def test_xapian_parsing(self):
flags = 'myflags'
url = '%s?FLAGS=%s' % (self.xapian_url, flags)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.xapian_backend.XapianEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/xapian_index')
self.assertTrue('FLAGS' in url.keys())
self.assertEqual(url['FLAGS'], flags)
self.assertTrue('INDEX_NAME' not in url)
def test_simple_parsing(self):
url = Env.search_url_config(self.simple_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.simple_backend.SimpleEngine')
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_common_args_parsing(self):
excluded_indexes = 'myapp.indexes.A,myapp.indexes.B'
include_spelling = 1
batch_size = 100
params = 'EXCLUDED_INDEXES=%s&INCLUDE_SPELLING=%s&BATCH_SIZE=%s' % (
excluded_indexes,
include_spelling,
batch_size
)
for url in [
self.solr_url,
self.elasticsearch_url,
self.whoosh_url,
self.xapian_url,
self.simple_url,
]:
url = '?'.join([url, params])
url = Env.search_url_config(url)
self.assertTrue('EXCLUDED_INDEXES' in url.keys())
self.assertTrue('myapp.indexes.A' in url['EXCLUDED_INDEXES'])
self.assertTrue('myapp.indexes.B' in url['EXCLUDED_INDEXES'])
self.assertTrue('INCLUDE_SPELLING'in url.keys())
self.assertTrue(url['INCLUDE_SPELLING'])
self.assertTrue('BATCH_SIZE' in url.keys())
self.assertEqual(url['BATCH_SIZE'], 100)
class EmailTests(unittest.TestCase):
def test_smtp_parsing(self):
url = 'smtps://user@domain.com:password@smtp.example.com:587'
url = Env.email_url_config(url)
self.assertEqual(url['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(url['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(url['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(url['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(url['EMAIL_PORT'], 587)
self.assertEqual(url['EMAIL_USE_TLS'], True)
class PathTests(unittest.TestCase):
def test_path_class(self):
root = Path(__file__, '..', is_file=True)
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
self.assertEqual(root(), root_path)
self.assertEqual(root.__root__, root_path)
web = root.path('public')
self.assertEqual(web(), os.path.join(root_path, 'public'))
self.assertEqual(web('css'), os.path.join(root_path, 'public', 'css'))
def test_required_path(self):
self.assertRaises(ImproperlyConfigured, Path, '/not/existing/path/', required=True)
self.assertRaises(ImproperlyConfigured, Path(__file__), 'not_existing_path', required=True)
def test_comparison(self):
self.assertTrue(Path('/home') in Path('/'))
self.assertTrue(Path('/home') not in Path('/other/dir'))
self.assertTrue(Path('/home') == Path('/home'))
self.assertTrue(Path('/home') != Path('/home/dev'))
self.assertEqual(Path('/home/foo/').rfind('/'), str(Path('/home/foo')).rfind('/'))
self.assertEqual(Path('/home/foo/').find('/home'), str(Path('/home/foo/')).find('/home'))
self.assertEqual(Path('/home/foo/')[1], str(Path('/home/foo/'))[1])
self.assertEqual(~Path('/home'), Path('/'))
self.assertEqual(Path('/') + 'home', Path('/home'))
self.assertEqual(Path('/') + '/home/public', Path('/home/public'))
self.assertEqual(Path('/home/dev/public') - 2, Path('/home'))
self.assertEqual(Path('/home/dev/public') - 'public', Path('/home/dev'))
self.assertRaises(TypeError, lambda _: Path('/home/dev/') - 'not int')
def load_suite():
test_suite = unittest.TestSuite()
cases = [
EnvTests, FileEnvTests, SubClassTests, SchemaEnvTests, PathTests,
DatabaseTestSuite, CacheTestSuite, EmailTests, SearchTestSuite
]
for case in cases:
test_suite.addTest(unittest.makeSuite(case))
return test_suite
if __name__ == "__main__":
try:
if sys.argv[1] == '-o':
for key, value in BaseTests.generateData().items():
print("{0}={1}".format(key, value))
sys.exit()
except IndexError:
pass
unittest.TextTestRunner().run(load_suite())
| |
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core.basic import S
from time import sleep
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe':1, 'solid':2, 'both':3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85,0.85,0.85)
default_solid_color = (0.6,0.6,0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except:
print ("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [ [S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0] ]
self.cbounds = [ [S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0],[S.Infinity,-S.Infinity,0] ]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f): f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f): f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet(): return
self._calculating_verts.set()
try: self._on_calculate_verts()
finally: self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet(): return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try: self._on_calculate_cverts()
finally: self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None: return
if v is '':
step_max = 0
for i in self.intervals:
if i.v_steps is None: continue
step_max = max([step_max, i.v_steps])
v = ['both', 'solid'][step_max > 40]
#try:
assert v in self.styles
if v == self._style: return
self._style = v
#except Exception, e:
#raise Exception(("Style change failed. "
#"Reason: %s is not a valid "
#"style. Use one of %s.") %
#(str(v), ', '.join(self.styles.iterkeys())))
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if isinstance(v, (list, tuple)):
v = ColorScheme(*v)
else: v = ColorScheme(v)
if repr(v) == repr(self._color): return
self._on_change_color(v)
self._color = v
except Exception, e:
raise Exception(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [ ( 'mode', self.primary_alias ),
( 'color', str(self.color) ),
( 'style', str(self.style) ) ]
o = "'%s'" % (("; ".join("%s=%s" % (k,v)
for k,v in d if v != 'None')))
return ", ".join([f, i, o])
| |
from celery import chain
from waldur_core.core import executors
from waldur_core.core import tasks as core_tasks
from waldur_core.core import utils as core_utils
from waldur_core.structure import executors as structure_executors
from . import models
from .tasks import SetInstanceErredTask
class VolumeCreateExecutor(executors.CreateExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume, 'create_volume', state_transition='begin_creating'
),
core_tasks.PollRuntimeStateTask()
.si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error',
)
.set(countdown=30),
)
class VolumeDeleteExecutor(executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
if volume.backend_id:
return core_tasks.BackendMethodTask().si(
serialized_volume, 'delete_volume', state_transition='begin_deleting'
)
else:
return core_tasks.StateTransitionTask().si(
serialized_volume, state_transition='begin_deleting'
)
class VolumeDetachExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume, 'detach_volume', state_transition='begin_updating'
),
core_tasks.PollRuntimeStateTask()
.si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='available',
erred_state='error',
)
.set(countdown=10),
)
class VolumeAttachExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_volume, 'attach_volume', state_transition='begin_updating'
),
core_tasks.PollRuntimeStateTask()
.si(
serialized_volume,
backend_pull_method='pull_volume_runtime_state',
success_state='inuse',
erred_state='error',
)
.set(countdown=10),
)
class InstanceCreateExecutor(executors.CreateExecutor):
@classmethod
def get_task_signature(
cls,
instance,
serialized_instance,
image=None,
size=None,
ssh_key=None,
volume=None,
):
kwargs = {
'backend_image_id': image.backend_id,
'backend_size_id': size.backend_id,
}
if ssh_key is not None:
kwargs['ssh_key_uuid'] = ssh_key.uuid.hex
serialized_volume = core_utils.serialize_instance(volume)
return chain(
core_tasks.StateTransitionTask().si(
serialized_volume, state_transition='begin_creating'
),
core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='create_instance',
state_transition='begin_creating',
**kwargs
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='running',
erred_state='error',
),
core_tasks.BackendMethodTask().si(
serialized_volume,
backend_method='pull_instance_volume',
success_runtime_state='inuse',
),
core_tasks.BackendMethodTask().si(
serialized_instance, 'pull_instance_public_ips'
),
)
@classmethod
def get_success_signature(cls, instance, serialized_instance, **kwargs):
serialized_volume = core_utils.serialize_instance(instance.volume_set.first())
return chain(
core_tasks.StateTransitionTask().si(
serialized_volume, state_transition='set_ok'
),
core_tasks.StateTransitionTask().si(
serialized_instance, state_transition='set_ok'
),
)
@classmethod
def get_failure_signature(cls, instance, serialized_instance, **kwargs):
return SetInstanceErredTask().s(serialized_instance)
class InstanceResizeExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
size = kwargs.pop('size')
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance,
backend_method='resize_instance',
state_transition='begin_updating',
size_id=size.backend_id,
),
core_tasks.PollRuntimeStateTask()
.si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='stopped',
erred_state='error',
)
.set(countdown=30),
)
@classmethod
def get_success_signature(cls, instance, serialized_instance, **kwargs):
return core_tasks.StateTransitionTask().si(
serialized_instance, state_transition='set_ok'
)
class InstanceStopExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance, 'stop_instance', state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='stopped',
erred_state='erred',
),
)
class InstanceStartExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance,
'start_instance',
state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='running',
erred_state='erred',
),
core_tasks.BackendMethodTask().si(
serialized_instance, 'pull_instance_public_ips'
),
)
class InstanceRestartExecutor(executors.ActionExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance,
'reboot_instance',
state_transition='begin_updating',
),
core_tasks.PollRuntimeStateTask().si(
serialized_instance,
backend_pull_method='pull_instance_runtime_state',
success_state='running',
erred_state='erred',
),
)
class InstanceDeleteExecutor(executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, instance, serialized_instance, **kwargs):
if not instance.backend_id:
return core_tasks.StateTransitionTask().si(
serialized_instance, state_transition='begin_deleting'
)
return chain(
core_tasks.BackendMethodTask().si(
serialized_instance,
'destroy_instance',
state_transition='begin_deleting',
),
core_tasks.PollBackendCheckTask().si(
serialized_instance, backend_check_method='is_instance_terminated'
),
)
class AWSCleanupExecutor(structure_executors.BaseCleanupExecutor):
executors = (
(models.Instance, InstanceDeleteExecutor),
(models.Volume, VolumeDeleteExecutor),
)
| |
import json
import logging
import os
import shutil
import time
from subprocess import Popen, PIPE
from urllib import parse
import requests
from flask import session
from son_editor.app.database import db_session, scan_project_dir, sync_project_descriptor
from son_editor.app.exceptions import NotFound, InvalidArgument, NameConflict
from son_editor.impl import usermanagement
from son_editor.models.project import Project
from son_editor.models.workspace import Workspace
from son_editor.util.constants import PROJECT_REL_PATH, Github, REQUIRED_SON_PROJECT_FILES
logger = logging.getLogger(__name__)
def create_oauth_header() -> dict:
"""
Creates oauth header by providing the access token in the header.
:return: Header as dict
"""
return {'Authorization': 'token {}'.format(session['access_token'])}
def build_github_delete(owner: str, repo_name: str) -> str:
"""
Builds relative github api url to delete a repository
:param owner: Owner of the github repository
:param repo_name: Repository name
:return:
"""
return Github.API_URL + Github.API_DELETE_REPO.format(owner, repo_name)
def is_github(netloc):
"""
Checks if the given url is on github
:param netloc: http url
:return: True
"""
if netloc.lower() in Github.DOMAINS:
return True
return False
def git_command(git_args: list, cwd: str = None):
"""
Calls the git command with given args and returns out, err and exitcode
:param git_args: Arguments for git
:param cwd: Optional current working directory
:return: out, error, exitcode
"""
args = ['git']
args.extend(git_args)
git_process = Popen(args,
stdout=PIPE, stderr=PIPE, cwd=cwd)
out, err = git_process.communicate()
exitcode = git_process.returncode
return out.decode(), err.decode(), exitcode
def create_info_dict(out: str = None, err: str = None, exitcode: int = 0) -> dict:
"""
Creates a dict that holds process information
:param out: Out bytes
:param err: Err bytes
:param exitcode: exitcode
:return: Dict with packed information.
"""
# Empty result_dict
result_dict = {'success': exitcode is 0}
# Prioritize err message
if err:
result_dict.update({'message': err})
elif out:
result_dict.update({'message': out})
if exitcode:
result_dict.update({'exitcode': exitcode})
return result_dict
def get_project(ws_id, pj_id: int, session=db_session()) -> Project:
"""
Returns a project and raises 404, when project not found.
:param ws_id: Workspace id
:param pj_id: Project id
:param db session
:return: Project model
"""
project = session.query(Project).join(Workspace) \
.filter(Workspace.id == ws_id) \
.filter(Project.id == pj_id).first()
if not project:
raise NotFound("Could not find project with id {}".format(pj_id))
return project
def check_son_validity(project_path: str):
"""
Checks if the given project path is a valid son project, otherwise it raises an exception. Valid means, it has
a consistent son file structure, so no semantics will be tested.
:param project_path:
:return:
"""
missing_files = []
files = [f for f in os.listdir(project_path)]
logger.warn('Files in {}: '.format(project_path))
for f in files:
logger.warn('{}'.format(f))
for file in REQUIRED_SON_PROJECT_FILES:
if not os.path.isfile(os.path.join(project_path, file)):
missing_files.append(file)
missing_files_count = len(missing_files)
# If project seems to be valid.
if missing_files_count is 0:
return
elif missing_files_count is 1:
result = "The project has no '{}' file".format(file)
else:
result = "The project has the following missing files: '{}'".format(",".join(missing_files_count))
raise InvalidArgument(result)
def get_workspace(ws_id: int) -> Workspace:
"""
Returns the workspace model of the given workspace
:param ws_id:
:return:
"""
workspace = db_session().query(Workspace).filter(Workspace.id == ws_id).first()
if not workspace:
raise NotFound("Could not find workspace with id {}".format(ws_id))
return workspace
def init(ws_id: int, project_id: int):
"""
Initializes a git repository in the given project
:param ws_id:
:param project_id:
:return:
"""
project = get_project(ws_id, project_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
out, err, exitcode = git_command(['init'], cwd=project_full_path)
# Additionally set repository user information
if exitcode is 0:
setup_git_user_email(project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
def setup_git_user_email(project_full_path: str):
user = usermanagement.get_user(session['user_data']['login'])
git_command(['config', 'user.name', user.name], cwd=project_full_path)
git_command(['config', 'user.email', user.email], cwd=project_full_path)
git_command(['config', 'push.default', 'simple'], cwd=project_full_path)
def commit_and_push(ws_id: int, project_id: int, commit_message: str):
"""
Commits and then pushes changes.
:param ws_id:
:param project_id:
:param commit_message:
:return:
"""
project = get_project(ws_id, project_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
logger.warn("Commit and Push files")
files = [f for f in os.listdir(project_full_path)]
logger.warn('Files in {}: '.format(project_full_path))
for f in files:
logger.warn('{}'.format(f))
# Stage all modified, added, removed files
out, err, exitcode = git_command(['add', '-A'], cwd=project_full_path)
if exitcode is not 0:
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Add succeeded: {}".format(out))
# Commit with message
out, err, exitcode = git_command(['commit', "-m '{}'".format(commit_message)], cwd=project_full_path)
if exitcode is not 0:
git_command(['reset', 'HEAD~1'], cwd=project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Commit succeeded: {}".format(out))
# Push all changes to the repo url
sout, serr, sexitcode = git_command(['status', '-u'], cwd=project_full_path)
url_decode = parse.urlparse(project.repo_url)
logger.warn("Executed status".format(out))
git_command(['remote', 'rm', 'origin', _get_repo_url(url_decode)], cwd=project_full_path)
git_command(['remote', 'add', 'origin', _get_repo_url(url_decode)], cwd=project_full_path)
git_command(['push', '--set-upstream', 'origin', 'master'], cwd=project_full_path)
git_command(['push', '-u'], cwd=project_full_path)
# time.sleep(30)
if exitcode is not 0:
git_command(['reset', 'HEAD~1'], cwd=project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Push succeeded: {}".format(out))
logger.warn("Push out: {}\n err: {} \n exitcode: {}\n repo url: {}".format(out, err, exitcode,
_get_repo_url(url_decode)))
logger.warn("Status: out: {}\n err: {} \n exitcode: {}\n".format(sout, serr, sexitcode))
# Success on commit
return create_info_dict(out)
def create_commit_and_push(ws_id: int, project_id: int, remote_repo_name: str):
"""
Creates a remote GitHub repository named remote_repo_name and pushes given git project into it.
:param ws_id: Workspace ID
:param project_id: Project ID to create and push it
:param remote_repo_name: Remote repository name
:return:
"""
database_session = db_session()
try:
project = get_project(ws_id, project_id, database_session)
# curl -H "Authorization: token [TOKEN]" -X POST https://api.github.com/user/repos --data '{"name":"repo_name"}'
repo_data = {'name': remote_repo_name}
request = requests.post(Github.API_URL + Github.API_CREATE_REPO_REL, json=repo_data,
headers=create_oauth_header())
# Handle exceptions
if request.status_code != 201:
# Repository already exists
if request.status_code == 422:
raise NameConflict("Repository with name {} already exist on GitHub".format(remote_repo_name))
raise Exception("Unhandled status_code: {}\n{}".format(request.status_code, request.text))
# Get git url and commit to db
data = json.loads(request.text)
git_url = data['svn_url']
project.repo_url = git_url
database_session.commit()
except Exception:
database_session.rollback()
raise
# Try to push project
try:
# Give github some time to see created repo
# (dirty hack)
time.sleep(0.5)
return commit_and_push(ws_id, project_id, "Initial commit")
except Exception:
# Delete newly created repository if commit and push failed.
result = requests.delete(build_github_delete(session['user_data']['login'], remote_repo_name),
headers=create_oauth_header())
# Reraise
raise
def delete(ws_id: int, project_id: int, remote_repo_name: str, organization_name: str = None):
"""
Deletes given project on remote repository
:param project_id:
:param ws_id: Workspace of the project
:param remote_repo_name: Remote repository name
:param organization_name: Optional parameter to specify the organization / login
:return:
"""
if organization_name is None:
owner = session['user_data']['login']
else:
owner = organization_name
sql_session = db_session()
project = get_project(ws_id, project_id, sql_session)
url_decode = parse.urlparse(project.repo_url)
if _repo_name_from_url(url_decode) == remote_repo_name:
result = _do_delete(owner, remote_repo_name)
if result.status_code == 204:
project.repo_url = None
sql_session.commit()
return create_info_dict("Successfully deleted")
else:
sql_session.rollback()
return create_info_dict(result.text, exitcode=1)
raise InvalidArgument("The given repo name does not correspond to the remote repository name")
def _do_delete(owner, remote_repo_name):
return requests.delete(build_github_delete(owner, remote_repo_name), headers=create_oauth_header())
def diff(ws_id: int, pj_id: int):
"""
Shows the local changes of the given project.
:param ws_id: Workspace of the project.
:param pj_id: Given project to show from.
:return:
"""
project = get_project(ws_id, pj_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
out, err, exitcode = git_command(['diff'], project_full_path)
if exitcode is 0:
return create_info_dict(out)
else:
return create_info_dict(out, err, exitcode)
def status(ws_id: int, pj_id: int):
"""
Shows the git status of the repository
:param ws_id:
:param pj_id:
:return:
"""
project = get_project(ws_id, pj_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
# fetch remote changes
out, err, exitcode = git_command(['remote', 'update'], project_full_path)
if exitcode is 0:
# get the status
out, err, exitcode = git_command(['status', '-uno', '-u'], project_full_path)
if exitcode is 0:
return create_info_dict(out)
return create_info_dict(out, err, exitcode)
def pull(ws_id: int, project_id: int):
"""
Pulls data from the given project_id.
:param user_data: Session data to get access token for GitHub
:param ws_id: Workspace of the project
:param project_id: Project to pull.
:return:
"""
project = get_project(ws_id, project_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
# Error handling
if not os.path.isdir(project_full_path):
raise Exception("Could not find project directory {}".format(project_full_path))
if not project.repo_url:
raise InvalidArgument("Project with id {} is missing the repo attribute".format(project_id))
# Pull in project directory
# If url in GitHub domain, access by token
out, err, exitcode = git_command(['pull', project.repo_url], cwd=project_full_path)
if exitcode is not 0:
return create_info_dict(err=err, exitcode=exitcode)
return create_info_dict(out=out)
def list(ws_id: int):
"""
Lists the available remote repositories.
:param ws_id:
:return: https://developer.github.com/v3/repos/#response
"""
result = requests.get(Github.API_URL + Github.API_LIST_REPOS.format(session['user_data']['login']),
headers=create_oauth_header())
return json.loads(result.text)
def _repo_name_from_url(url_decode: str):
github_project_name = os.path.split(url_decode.path)[-1]
return github_project_name.replace('.git', '')
def clone(ws_id: int, url: str, name: str = None):
"""
Clones a repository by url into given workspace
:param name: Optional name of the local repository name, otherwise the remote name is taken
:param user_data: Session data to get access token for GitHub
:param ws_id: Destination workspace to clone
:param url: URL of the source repository
:return: True if successful, otherwise NameConflict is thrown
"""
workspace = get_workspace(ws_id)
url_decode = parse.urlparse(url)
if is_github(url_decode.netloc):
# Take the suffix of url as first name candidate
github_project_name = name
if github_project_name is None:
github_project_name = _repo_name_from_url(url_decode)
dbsession = db_session()
pj = dbsession.query(Project).filter(Workspace.id == workspace.id).filter(
Project.name == github_project_name).first()
dbsession.commit()
# Error when the project name in given workspace already exists
if pj is not None:
raise NameConflict('A project with name {} already exists'.format(github_project_name))
project_target_path = os.path.join(workspace.path, PROJECT_REL_PATH, github_project_name)
logger.info('Cloning from github repo...')
# If url in GitHub domain, access by token
url_with_token = _get_repo_url(url_decode)
out, err, exitcode = git_command(['clone', url_with_token, project_target_path])
if exitcode is 0:
setup_git_user_email(project_target_path)
# Check if the project is a valid son project
check_son_validity(project_target_path)
# Create project and scan it.
dbsession = db_session()
try:
pj = Project(github_project_name, github_project_name, workspace)
pj.repo_url = url
sync_project_descriptor(pj)
dbsession.add(pj)
scan_project_dir(project_target_path, pj)
dbsession.commit()
# Check if the project is valid
result = create_info_dict(out=out)
result["id"] = pj.id
return result
except:
dbsession.rollback()
shutil.rmtree(project_target_path)
raise Exception("Scan project failed")
else:
return create_info_dict(err=err, exitcode=exitcode)
raise NotImplemented("Cloning from other is not implemented yet. Only github is supported for now.")
def _get_repo_url(url_decode):
return 'https://{}@github.com{}'.format(session['access_token'], url_decode.path)
| |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class MxfOpTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'op': 'OperationalPattern',
'require_closed': 'bool',
'require_complete': 'bool',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'op': 'op',
'require_closed': 'require_closed',
'require_complete': 'require_complete',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, op=None, require_closed=None, require_complete=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""MxfOpTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._op = None
self._require_closed = None
self._require_complete = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if op is not None:
self.op = op
if require_closed is not None:
self.require_closed = require_closed
if require_complete is not None:
self.require_complete = require_complete
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def op(self):
"""Gets the op of this MxfOpTest. # noqa: E501
:return: The op of this MxfOpTest. # noqa: E501
:rtype: OperationalPattern
"""
return self._op
@op.setter
def op(self, op):
"""Sets the op of this MxfOpTest.
:param op: The op of this MxfOpTest. # noqa: E501
:type: OperationalPattern
"""
self._op = op
@property
def require_closed(self):
"""Gets the require_closed of this MxfOpTest. # noqa: E501
:return: The require_closed of this MxfOpTest. # noqa: E501
:rtype: bool
"""
return self._require_closed
@require_closed.setter
def require_closed(self, require_closed):
"""Sets the require_closed of this MxfOpTest.
:param require_closed: The require_closed of this MxfOpTest. # noqa: E501
:type: bool
"""
self._require_closed = require_closed
@property
def require_complete(self):
"""Gets the require_complete of this MxfOpTest. # noqa: E501
:return: The require_complete of this MxfOpTest. # noqa: E501
:rtype: bool
"""
return self._require_complete
@require_complete.setter
def require_complete(self, require_complete):
"""Sets the require_complete of this MxfOpTest.
:param require_complete: The require_complete of this MxfOpTest. # noqa: E501
:type: bool
"""
self._require_complete = require_complete
@property
def reject_on_error(self):
"""Gets the reject_on_error of this MxfOpTest. # noqa: E501
:return: The reject_on_error of this MxfOpTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this MxfOpTest.
:param reject_on_error: The reject_on_error of this MxfOpTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this MxfOpTest. # noqa: E501
:return: The checked of this MxfOpTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this MxfOpTest.
:param checked: The checked of this MxfOpTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MxfOpTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MxfOpTest):
return True
return self.to_dict() != other.to_dict()
| |
from typing import List, Set, Tuple, Optional, Union, Dict
from hwt.code import SwitchLogic, Switch
from hwt.hdl.statements.statement import HdlStatement
from hwt.hdl.types.bits import Bits
from hwt.interfaces.std import HandshakeSync
from hwt.math import log2ceil
from hwt.pyUtils.uniqList import UniqList
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwtHls.allocator.architecturalElement import AllocatorArchitecturalElement
from hwtHls.allocator.connectionsOfStage import getIntfSyncSignals, \
setNopValIfNotSet, SignalsOfStages, ConnectionsOfStage
from hwtHls.allocator.time_independent_rtl_resource import TimeIndependentRtlResource
from hwtHls.clk_math import start_clk
from hwtHls.netlist.analysis.fsm import IoFsm
from hwtHls.netlist.nodes.backwardEdge import HlsNetNodeReadBackwardEdge, \
HlsNetNodeWriteBackwardEdge
from hwtHls.netlist.nodes.io import HlsNetNodeWrite, HlsNetNodeRead
from hwtHls.netlist.nodes.node import HlsNetNode
from ipCorePackager.constants import INTF_DIRECTION
class AllocatorFsmContainer(AllocatorArchitecturalElement):
"""
Container class for FSM allocation objects.
"""
def __init__(self, parentHls: "HlsPipeline", namePrefix:str, fsm: IoFsm):
allNodes = UniqList()
for nodes in fsm.states:
allNodes.extend(nodes)
self.fsm = fsm
clkPeriod = self.normalizedClkPeriod = parentHls.normalizedClkPeriod
assert fsm.states, fsm
self.fsmEndClk_i = max(fsm.stateClkI.values())
self.fsmBeginClk_i = min(fsm.stateClkI.values())
self.clkIToStateI = clkIToStateI = {v:k for k, v in fsm.stateClkI.items()}
stateCons = [ConnectionsOfStage() for _ in fsm.states]
stageSignals = SignalsOfStages(clkPeriod,
(
stateCons[clkIToStateI[clkI]].signals if clkI in clkIToStateI else None
for clkI in range(self.fsmEndClk_i + 1)
))
AllocatorArchitecturalElement.__init__(self, parentHls, namePrefix, allNodes, stateCons, stageSignals)
def _afterNodeInstantiated(self, n: HlsNetNode, rtl: Optional[TimeIndependentRtlResource]):
# mark value in register as persistent until the end of FSM
isTir = isinstance(rtl, TimeIndependentRtlResource)
if rtl is None or not isTir:
cons = (self.netNodeToRtl[o] for o in n._outputs if o in self.netNodeToRtl)
elif isTir and rtl.timeOffset == TimeIndependentRtlResource.INVARIANT_TIME:
return
else:
cons = (rtl,)
clkPeriod = self.normalizedClkPeriod
fsmEndClk_i = self.fsmEndClk_i
for s in cons:
s: TimeIndependentRtlResource
assert len(s.valuesInTime) == 1, ("Value must not be used yet because we need to set persistence ranges first.", s)
if not s.persistenceRanges and s.timeOffset is not TimeIndependentRtlResource.INVARIANT_TIME:
self.stageSignals.getForTime(s.timeOffset).append(s)
# value for the first clock behind this clock period and the rest is persistent in this register
nextClkI = start_clk(s.timeOffset, clkPeriod) + 2
if nextClkI <= fsmEndClk_i:
s.persistenceRanges.append((nextClkI, fsmEndClk_i))
for dep in n.dependsOn:
self._afterOutputUsed(dep)
def connectSync(self, clkI: int, intf: HandshakeSync, intfDir: INTF_DIRECTION):
try:
stateI = self.clkIToStateI[clkI]
except KeyError:
raise AssertionError("Asking for a sync in an element which is not scheduled in this clk period", self, clkI, self.clkIToStateI)
con = self.connections[stateI]
if intfDir == INTF_DIRECTION.MASTER:
con.outputs.append(intf)
else:
assert intfDir == INTF_DIRECTION.SLAVE, intfDir
con.inputs.append(intf)
self._initNopValsOfIoForIntf(intf, intfDir)
def _initNopValsOfIoForIntf(self, intf: Interface, intfDir: INTF_DIRECTION):
if intfDir == INTF_DIRECTION.MASTER:
# to prevent latching when interface is not used
syncSignals = getIntfSyncSignals(intf)
setNopValIfNotSet(intf, None, syncSignals)
else:
assert intfDir == INTF_DIRECTION.SLAVE, (intf, intfDir)
syncSignals = getIntfSyncSignals(intf)
for s in syncSignals:
setNopValIfNotSet(s, 0, ())
def _initNopValsOfIo(self):
"""
initialize nop value which will drive the IO when not used
"""
for nodes in self.fsm.states:
for node in nodes:
if isinstance(node, HlsNetNodeWrite):
self._initNopValsOfIoForIntf(node.dst, INTF_DIRECTION.MASTER)
elif isinstance(node, HlsNetNodeRead):
self._initNopValsOfIoForIntf(node.src, INTF_DIRECTION.SLAVE)
def _detectStateTransitions(self):
localControlReads: UniqList[HlsNetNodeReadBackwardEdge] = UniqList()
controlToStateI: Dict[Union[HlsNetNodeReadBackwardEdge, HlsNetNodeWriteBackwardEdge]] = {}
for stI, nodes in enumerate(self.fsm.states):
for node in nodes:
node: HlsNetNode
if isinstance(node, HlsNetNodeReadBackwardEdge):
node: HlsNetNodeReadBackwardEdge
if node.associated_write in self.allNodes:
localControlReads.append(node)
node.associated_write.allocateAsBuffer = False # allocate as a register because this is just local controll channel
controlToStateI[node] = stI
elif isinstance(node, HlsNetNodeWriteBackwardEdge):
if node.associated_read in self.allNodes:
controlToStateI[node] = stI
for r in localControlReads:
r: HlsNetNodeReadBackwardEdge
srcStI = controlToStateI[r.associated_write]
dstStI = controlToStateI[r]
curTrans = self.fsm.transitionTable[srcStI].get(dstStI, None)
cond = self.instantiateHlsNetNodeOut(r._outputs[0]).valuesInTime[0].data.next
if curTrans is not None:
cond = cond | curTrans
self.fsm.transitionTable[srcStI][dstStI] = cond
# detect the state propagation logic and resolve how to replace it wit a state bit
# * state bit will be just stored as a register in this fsm
# * read will just read this bit
# * write will set this bit to a value specified in write src if all write conditions are meet
# * if the value writen to channel is 1 it means that fsm jump to state where associated read is
# There could be multiple channels written but the 1 should be writen to just 1
# * Because the control channel is just local it is safe to replace it
# However we must keep it in allNodes list so the node is still registered for this element
def allocateDataPath(self, iea: "InterArchElementNodeSharingAnalysis"):
"""
Instantiate logic in the states
:note: This function does not perform efficient register allocations.
Instead each value is store in individual register.
The register is created when value (TimeIndependentRtlResource) is first used from other state/clock cycle.
"""
self.interArchAnalysis = iea
self._detectStateTransitions()
for (nodes, con) in zip(self.fsm.states, self.connections):
ioMuxes: Dict[Interface, Tuple[Union[HlsNetNodeRead, HlsNetNodeWrite], List[HdlStatement]]] = {}
ioSeen: UniqList[Interface] = UniqList()
for node in nodes:
node: HlsNetNode
wasInstantiated = node._outputs and node._outputs[0] not in self.netNodeToRtl
rtl = node.allocateRtlInstance(self)
if wasInstantiated:
self._afterNodeInstantiated(node, rtl)
if isinstance(node, HlsNetNodeRead):
if isinstance(node, HlsNetNodeReadBackwardEdge) and not node.associated_write.allocateAsBuffer:
continue
self._allocateIo(node.src, node, con, ioMuxes, ioSeen, rtl)
elif isinstance(node, HlsNetNodeWrite):
if isinstance(node, HlsNetNodeWriteBackwardEdge) and not node.allocateAsBuffer:
con.stDependentDrives.append(rtl)
continue
self._allocateIo(node.dst, node, con, ioMuxes, ioSeen, rtl)
for rtl in self._allocateIoMux(ioMuxes, ioSeen):
con.stDependentDrives.append(rtl)
def allocateSync(self):
fsm = self.fsm
self._initNopValsOfIo()
st = self._reg(f"{self.namePrefix}st_{fsm.intf._name}",
Bits(log2ceil(len(fsm.states)), signed=False),
def_val=0)
# instantiate control of the FSM
# used to prevent duplication of registes which are just latching the value
# without modification throught multiple stages
seenRegs: Set[TimeIndependentRtlResource] = set()
stateTrans: List[Tuple[RtlSignal, List[HdlStatement]]] = []
for stI, con in enumerate(self.connections):
for s in con.signals:
s: TimeIndependentRtlResource
# if the value has a register at the end of this stage
v = s.checkIfExistsInClockCycle(self.fsmBeginClk_i + stI + 1)
if v is not None and v.is_rlt_register() and not v in seenRegs:
con.stDependentDrives.append(v.data.next.drivers[0])
seenRegs.add(v)
unconditionalTransSeen = False
inStateTrans: List[Tuple[RtlSignal, List[HdlStatement]]] = []
sync = self._makeSyncNode(con)
ack = sync.ack()
for dstSt, c in sorted(fsm.transitionTable[stI].items(), key=lambda x: x[0]):
assert not unconditionalTransSeen, "If there is an unconditional transition it must be last"
if isinstance(ack, (bool, int)):
c = c & ack
else:
c = ack & c
if c == 1:
unconditionalTransSeen = True
inStateTrans.append((c, st(dstSt)))
else:
inStateTrans.append((c, st(dstSt)))
stateTrans.append((stI, [SwitchLogic(inStateTrans),
con.stDependentDrives,
sync.sync()]))
return Switch(st).add_cases(stateTrans)
| |
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import pymoskito as pm
class TwoPendulum(pm.PostProcessingModule):
"""
plot diagrams of all system quantities
"""
def __init__(self):
pm.PostProcessingModule.__init__(self)
return
def run(self, data):
return_list = []
t = data["results"]["time"]
x0 = data["results"]["Solver"][:, 0]
x0_vel = data["results"]["Solver"][:, 1]
phi1 = data["results"]["Solver"][:, 2]
phi1_vel = data["results"]["Solver"][:, 3]
phi2 = data["results"]["Solver"][:, 4]
phi2_vel = data["results"]["Solver"][:, 5]
val_list = [x0, x0_vel, phi1, phi1_vel, phi2, phi2_vel]
title_list = ["Position of the cart",
"Velocity of the cart",
"Angle long pendulum",
"Angular velocity long pendulum",
"Angle short pendulum",
"Angular velocity short pendulum"]
x_label_list = [r"$x_{0}$ in m",
r"$\dot{x}_{0}$ in m/s",
r"$\varphi_{1}$ in degree",
r"$\dot{\varphi}_{1}$ in degree/s",
r"$\varphi_{2}$ in degree",
r"$\dot{\varphi}_{2}$ in degree/s"]
filename_list = ["x0",
"x0_vel",
"phi1",
"phi1_vel",
"phi2",
"phi2_vel"]
if 0:
for idx, val in enumerate(val_list):
fig = Figure()
axes = fig.add_subplot(111)
axes.set_title(r"\textbf{%s}" % title_list[idx])
axes.plot(t, val, c="k")
axes.set_xlim(left=0, right=t[-1])
axes.set_xlabel(r"Time in s")
axes.set_ylabel(r"%s" % x_label_list[idx])
axes.grid(True)
canvas = FigureCanvas(fig)
plot_name = "_".join([data["regime name"], self.name, filename_list[idx]])
return_list.append({"name": plot_name, "figure": canvas})
# save file
self.write_output_files(result_name="_".join([data["regime name"], filename_list[idx]]),
figure=fig,
output=pm.construct_result_dict(data, output={}))
#################################################
# this section is for combined and custom plots
# plot both angle of the pendulums in one plot
plot_selection = {"x0_x0_vel": True,
"phi1_phi2": True,
"V_V_dot": True}
if plot_selection["x0_x0_vel"]:
self._logger.info("creating x0_x0_vel plot")
fig = Figure()
axes = fig.add_subplot(111)
axes.plot(t, x0, c="b", label=r"$x_{0}$ in m")
axes.plot(t, x0_vel, c="r", label=r"$\dot{x}_{0}$ in $\frac{\mathrm{m}}{\mathrm{s}}$")
axes.set_xlim(left=t[0], right=t[-1])
axes.set_xlabel(r"Time in s")
axes.set_ylabel(r"")
axes.grid(True)
axes.legend()
axes.legend(loc=0)
canvas = FigureCanvas(fig)
plot_name = "_".join([data["regime name"], self.name, "x0_x0_vel"])
return_list.append({"name": plot_name, "figure": canvas})
# save file
self.write_output_files(result_name="_".join([data["regime name"], "x0_x0_vel"]),
figure=fig,
output=pm.construct_result_dict(data, output={}))
if plot_selection["phi1_phi2"]:
self._logger.info("creating phi1_phi2 plot")
fig = Figure()
axes = fig.add_subplot(111)
axes.plot(t, np.rad2deg(phi1), c="k", label=r"$\varphi_{1}$")
axes.plot(t, np.rad2deg(phi2), c="b", label=r"$\varphi_{2}$")
axes.set_xlim(left=t[0], right=t[-1])
axes.set_xlabel(r"Time in s")
axes.set_ylabel(r"Angle in $^{\circ}$")
# start, end = axes.get_ylim()
# fine segmentation
# axes.set_yticks(np.arange(int(start/30)*30, int(end/30)*30 + 30, 30), minor=True)
# axes.set_yticks(np.arange(int(start/90)*90, int(end/90)*90 + 90, 90), minor=False)
# rough segmentation
# axes.set_yticks(np.arange(int(start/90)*90, int(end/90)*90 + 90, 90), minor=True)
# axes.set_yticks(np.arange(int(start/180)*180, int(end/180)*180 + 180, 180), minor=False)
# axes.grid(which="minor", alpha=0.2)
# axes.grid(which="major", alpha=0.5)
axes.grid(True)
axes.legend()
axes.legend(loc=0)
canvas = FigureCanvas(fig)
plot_name = "_".join([data["regime name"], self.name, "phi1_phi2"])
return_list.append({"name": plot_name, "figure": canvas})
# save file
self.write_output_files(result_name="_".join([data["regime name"], "phi1_phi2"]),
figure=fig,
output=pm.construct_result_dict(data, output={}))
if plot_selection["V_V_dot"]:
self._logger.debug("creating V_V_dot plot")
V, V_dot = self.calc_v_and_v_dot(data, val_list)
fig = Figure()
axes = fig.add_subplot(111)
axes.plot(t, V, c="k", label=r"$V$ in $\frac{\mathrm{kg}^{2}\mathrm{m}^{4}}{\mathrm{s}^{4}}$")
# axes.plot(t, V_dot, c="b", label=r"$\dot{V}$ in $\frac{\mathrm{kg}^{2}\mathrm{m}^{4}}{\mathrm{s}^{5}}$")
axes.set_xlim(left=t[0], right=t[-1])
axes.set_xlabel(r"Time in s")
axes.set_ylabel(r"")
axes.grid(True)
axes.legend()
axes.legend(loc=0)
canvas = FigureCanvas(fig)
plot_name = "_".join([data["regime name"], self.name, "V_V_dot"])
return_list.append({"name": plot_name, "figure": canvas})
# save file
self.write_output_files(result_name="_".join([data["regime name"], "V_V_dot"]),
figure=fig,
output=pm.construct_result_dict(data, output={}))
return return_list
@staticmethod
def calc_v_and_v_dot(data, val_list):
if "Controller" in data["modules"]:
if "long pendulum" in data["modules"]["Controller"] and "short pendulum" in data["modules"]["Controller"]:
# extract parameter to simplify the calculation of V and V_dot
m0_star = data["modules"]["Model"]["m0*"]
m1_star = data["modules"]["Model"]["m1*"]
m2_star = data["modules"]["Model"]["m2*"]
l1_star = data["modules"]["Model"]["l1*"]
l2_star = data["modules"]["Model"]["l2*"]
# d0 = data["modules"]["Model"]["d0"]
d1 = data["modules"]["Model"]["d1"]
d2 = data["modules"]["Model"]["d2"]
J_DP1 = data["modules"]["Model"]["J_DP1"]
J_DP2 = data["modules"]["Model"]["J_DP2"]
g = data["modules"]["Model"]["g"]
k = data["modules"]["Controller"]["k"]
# calculate point mass model parameter
l1 = J_DP1/(m1_star*l1_star)
l2 = J_DP2/(m2_star*l2_star)
m1 = (m1_star*l1_star)**2/J_DP1
m2 = (m2_star*l2_star)**2/J_DP2
m0 = m0_star + (m1_star - m1) + (m2_star - m2)
w = m1*l1/(m2*l2)
# extract state from val_list
x0, x0_vel, phi1, phi1_vel, phi2, phi2_vel = val_list
E0 = 0.5*m0*x0**2
E1 = 0
E2 = 0
if data["modules"]["Controller"]["long pendulum"] == "u":
E1 = 0.5*m1*l1**2*phi1_vel**2 + m1*g*l1*(np.cos(phi1) + 1)
elif data["modules"]["Controller"]["long pendulum"] == "o":
E1 = 0.5*m1*l1**2*phi1_vel**2 + m1*g*l1*(np.cos(phi1) - 1)
if data["modules"]["Controller"]["short pendulum"] == "u":
E2 = 0.5*m2*l2**2*phi2_vel**2 + m2*g*l2*(np.cos(phi2) + 1)
elif data["modules"]["Controller"]["short pendulum"] == "o":
E2 = 0.5*m2*l2**2*phi2_vel**2 + m2*g*l2*(np.cos(phi2) - 1)
G = m0*x0_vel*E0 + m1*l1*phi1_vel*np.cos(phi1)*E1 + m2*l2*phi2_vel*np.cos(phi2)*E2*w**2
V = 0.5*E0**2 + 0.5*E1**2 + 0.5*E2**2
V_dot = -k*G**2 - d1*phi1_vel**2*E1 - d2*phi2_vel**2*E2
else:
V = np.zeros(len(val_list[0]))
V_dot = np.zeros(len(val_list[0]))
return [V, V_dot]
class SimulationVerification(pm.PostProcessingModule):
"""
plot diagrams of all system quantities
"""
def __init__(self):
pm.PostProcessingModule.__init__(self)
return
def run(self, data):
return_list = []
t = data["results"]["time"]
x0 = data["results"]["Solver"][:, 0]
x0_vel = data["results"]["Solver"][:, 1]
phi1 = data["results"]["Solver"][:, 2]
phi1_vel = data["results"]["Solver"][:, 3]
phi2 = data["results"]["Solver"][:, 4]
phi2_vel = data["results"]["Solver"][:, 5]
# extract parameter for further calculation
m0_star = data["modules"]["Model"]["m0*"]
m1_star = data["modules"]["Model"]["m1*"]
m2_star = data["modules"]["Model"]["m2*"]
l1_star = data["modules"]["Model"]["l1*"]
l2_star = data["modules"]["Model"]["l2*"]
d0 = data["modules"]["Model"]["d0"]
d1 = data["modules"]["Model"]["d1"]
d2 = data["modules"]["Model"]["d2"]
J_DP1 = data["modules"]["Model"]["J_DP1"]
J_DP2 = data["modules"]["Model"]["J_DP2"]
g = data["modules"]["Model"]["g"]
# calculate point mass model parameter
l1 = J_DP1/(m1_star*l1_star)
l2 = J_DP2/(m2_star*l2_star)
m1 = (m1_star*l1_star)**2/J_DP1
m2 = (m2_star*l2_star)**2/J_DP2
m0 = m0_star + (m1_star - m1) + (m2_star - m2)
# calculate the analytic solution of a pendulum angle
# initial equation
# phi_dd = g*sin(phi)/l1 - (d1*phi_d)/(m1*l1**2) + cos(phi)*u/l1
# linearisation in point phi_e = pi and u = 0 --> transformation: phi_lin = phi - phi_e
phi_e = np.pi
phi_vel_e = 0
# phi_dd_lin + (d1*phi_d_lin)/(m1*l1**2) + g*phi_lin/l1
# usage of the appropriation method
# lambda**2 + (d1/(m1*l1**2))*lambda + g/l = 0
# usage of the pq-formula
p = d1/(m1*l1**2)
q = g/l1
# parameter of the conjugate complex zero pair
a = -p/2
b = np.sqrt(abs((p/2)**2 - q))
# get initial values for angle and angle velocity
phi1_0 = phi1[0]
phi1_0_vel = phi1_vel[0]
# transform initial values into linear valid range
phi1_0_lin = phi1_0 - phi_e
phi1_0_vel_lin = phi1_0_vel - phi_vel_e
# calculate unknown parameter of the analytic solution
C1 = phi1_0_lin
C2 = -a*phi1_0_lin/b
phi1_lin_analytic = np.exp(a*t)*(C1*np.cos(b*t) + C2*np.sin(b*t))
phi1_vel_lin_analytic = np.exp(a*t)*(a*(C1*np.cos(b*t) + C2*np.sin(b*t)) + b*(-C1*np.sin(b*t) + C2*np.cos(b*t)))
# re-transformation of phi with phi = phi_lin + phi_e
phi1_analytic = phi1_lin_analytic + phi_e
phi1_vel_analytic = phi1_vel_lin_analytic + phi_vel_e
self._logger.debug("creating plot for verification")
self._logger.debug("error of phi1 and phi1 analytic: {}".format(sum(abs(phi1-phi1_analytic))))
self._logger.debug("Parameter of the analytic solution: a={}, b={}, C1={}, C2={}".format(a, b, C1, C2))
fig = Figure()
axes = fig.add_subplot(211)
axes.plot(t, np.rad2deg(phi1),
c="k",
linestyle="-",
label=r"$\varphi_{1}$")
axes.plot(t, np.rad2deg(phi1_analytic),
c="red",
linestyle="--",
linewidth=0.8,
label=r"$\varphi_{1,\mathrm{ana}}$")
axes.set_xlim(left=t[0], right=t[-1])
axes.set_xlabel(r"Time in s")
axes.set_ylabel(r"Angle in $^{\circ}$")
axes.grid()
axes.legend()
axes.legend(loc=0)
axes1 = fig.add_subplot(212)
axes1.plot(t, np.rad2deg(phi1_vel),
c="k",
linestyle="-",
label=r"$\dot{\varphi}_{1}$")
axes1.plot(t, np.rad2deg(phi1_vel_analytic),
c="red",
linestyle="--",
linewidth=0.8,
label=r"$\dot{\varphi}_{1,\mathrm{ana}}$")
axes1.set_xlim(left=t[0], right=t[-1])
axes1.set_xlabel(r"Time in s")
axes1.set_ylabel(r"Angular velocity in $^\circ\hspace{-3pt}/\mathrm{s}$")
axes1.grid()
axes1.legend()
axes1.legend(loc=0)
canvas = FigureCanvas(fig)
plot_name = "_".join([data["regime name"], self.name, "phi1"])
return_list.append({"name": plot_name, "figure": canvas})
# save file
self.write_output_files(result_name="_".join([data["regime name"], "phi1"]),
figure=fig,
output=pm.construct_result_dict(data, output={}))
return return_list
# parameter of the oscillation differential equation
# D = d1*np.sqrt(g/l1)/(2*m1*l1**2)
# omega0 = np.sqrt(l1/g)
pm.register_processing_module(pm.PostProcessingModule, TwoPendulum)
pm.register_processing_module(pm.PostProcessingModule, SimulationVerification)
| |
import json
from twisted.trial.unittest import TestCase
from twisted.internet import reactor
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from twisted.internet.endpoints import serverFromString
from twisted.test.proto_helpers import StringTransport
from twisted.web.client import HTTPConnectionPool
from twisted.web.http_headers import Headers
from twisted.internet.task import Clock
from heatherrd.relay import Relay, RelaySite, RelayProtocol, RelayFactory
import treq
from mock import patch, call, Mock
class RelayTest(TestCase):
def setUp(self):
self.pool = HTTPConnectionPool(reactor, persistent=False)
self.addCleanup(self.pool.closeCachedConnections)
@inlineCallbacks
def mk_relay(self, url='http://www.example.org'):
endpoint = serverFromString(reactor, 'tcp:0')
relay = Relay(url)
site = relay.app.resource()
port = yield endpoint.listen(RelaySite(site))
url = 'http://127.0.0.1:%s' % (port.getHost().port,)
self.addCleanup(port.loseConnection)
returnValue((url, relay))
def test_auth(self):
r = Relay('http://username:password@example.org')
self.assertEqual(r.url, 'http://example.org')
self.assertEqual(r.auth, ('username', 'password'))
def test_no_auth(self):
r = Relay('http://example.org')
self.assertEqual(r.url, 'http://example.org')
self.assertEqual(r.auth, None)
@inlineCallbacks
def test_connect_patched(self):
url, r = yield self.mk_relay()
r.get_protocol = Mock()
r.get_protocol.return_value = succeed(RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
}
}))
response = yield treq.post(
'%s/connect' % (url,),
auth=('bot-id', 'bot-token'),
pool=self.pool)
data = yield response.json()
self.assertEqual(data, {
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
},
})
r.get_protocol.assert_called_with(
bot_id='bot-id', bot_token='bot-token')
@inlineCallbacks
def test_disconnect(self):
url, r = yield self.mk_relay()
protocol = RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
}
})
protocol.transport = StringTransport()
protocol.transport.loseConnection = Mock()
protocol.transport.loseConnection.return_value = None
r.connections['bot-id'] = protocol
yield treq.post(
'%s/disconnect' % (url,),
auth=('bot-id', 'bot-token'),
pool=self.pool)
protocol.transport.loseConnection.assert_called_with()
@inlineCallbacks
def test_get_protocol(self):
_, r = yield self.mk_relay()
mock_proto = RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
},
})
r.rtm_start = Mock()
r.rtm_start.return_value = succeed(mock_proto)
protocol = yield r.get_protocol('bot-id', 'bot-token')
self.assertEqual(protocol, mock_proto)
self.assertEqual(r.connections, {
'bot-id': mock_proto
})
r.rtm_start.assert_called_with('bot-token')
@inlineCallbacks
def test_get_protocol_cached(self):
_, r = yield self.mk_relay()
r.connections['bot-id'] = 'Cached Protocol Value'
protocol = yield r.get_protocol('bot-id', 'bot-token')
self.assertEqual(protocol, 'Cached Protocol Value')
@inlineCallbacks
def test_remove_protocol(self):
_, r = yield self.mk_relay()
r.connections['foo'] = 'Cached Protocol Value'
protocol = yield r.remove_protocol('foo')
self.assertEqual(protocol, 'Cached Protocol Value')
self.assertEqual(r.connections, {})
@patch.object(treq, 'post')
@patch.object(Relay, 'connect_ws')
@inlineCallbacks
def test_rtm_start(self, mock_connect_ws, mock_post):
mock_response = Mock()
mock_response.json = lambda: succeed({'foo': 'bar'})
mock_post.return_value = succeed(mock_response)
mock_connect_ws.return_value = succeed('dummy return value')
_, r = yield self.mk_relay()
resp = yield r.rtm_start('token')
self.assertEqual(resp, 'dummy return value')
mock_connect_ws.assert_called_with({'foo': 'bar'})
@patch.object(treq, 'post')
@inlineCallbacks
def test_relay(self, mock_post):
mock_response = Mock()
mock_response.headers
mock_response.headers = Headers({
'Content-Type': ['application/json']
})
mock_response.json = Mock()
mock_response.json.return_value = succeed([])
mock_post.return_value = succeed(mock_response)
_, r = yield self.mk_relay('http://username:password@example.com/foo')
r.relay('user-id', 'bot-name', {'foo': 'bar'})
mock_post.assert_called_with(
'http://example.com/foo',
auth=('username', 'password'),
data='{"foo": "bar"}',
headers={
'Content-Type': 'application/json',
'X-Bot-User-Id': 'user-id',
'X-Bot-User-Name': 'bot-name',
},
timeout=2,
pool=r.pool)
@patch.object(treq, 'post')
@inlineCallbacks
def test_relay_with_inline_response(self, mock_post):
mock_response = Mock()
mock_response.headers
mock_response.headers = Headers({
'Content-Type': ['application/json']
})
mock_response.json = Mock()
mock_response.json.return_value = succeed([{
'text': 'the-outbound-reply'
}])
mock_post.return_value = succeed(mock_response)
_, r = yield self.mk_relay('http://username:password@example.com/foo')
mock_protocol = Mock()
mock_protocol.send_message = Mock()
mock_protocol.send_message.return_value = None
r.connections['user-id'] = mock_protocol
yield r.relay('user-id', 'user-name', {'foo': 'bar'})
mock_protocol.send_message.assert_called_with({
'text': 'the-outbound-reply'
})
@inlineCallbacks
def test_send_rtm(self):
mock_protocol = Mock()
mock_protocol.send_message = Mock()
mock_protocol.send_message.return_value = None
url, r = yield self.mk_relay()
r.get_protocol = Mock()
r.get_protocol.return_value = succeed(mock_protocol)
yield treq.post(
'%s/rtm' % (url,),
data=json.dumps({'foo': 'bar'}),
auth=('bot-id', 'bot-token'),
pool=self.pool)
mock_protocol.send_message.assert_called_with({
'foo': 'bar'
})
r.get_protocol.assert_called_with(bot_id='bot-id',
bot_token='bot-token')
def test_protocol_relay(self):
relay = Mock()
relay.relay = Mock()
protocol = RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
}
})
protocol.relay = relay
protocol.bot_user_id = 'the-user-id'
protocol.onMessage('{"foo": "bar"}', False)
relay.relay.assert_called_with(
'the-user-id', 'thebotuser', {"foo": "bar"})
@inlineCallbacks
def test_protocol_close(self):
_, r = yield self.mk_relay()
protocol = RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
},
})
protocol.factory = Mock()
protocol.bot_user_id = 'bot-user-id'
protocol.relay = r
r.set_protocol('bot-user-id', protocol)
protocol.onClose(True, None, None)
self.assertEqual(r.connections, {})
def test_ping(self):
protocol = RelayProtocol({
'self': {
'id': 'the-user-id',
'name': 'thebotuser',
},
})
protocol.clock = Clock()
protocol.factory = Mock()
protocol.sendMessage = Mock()
protocol.onOpen()
protocol.clock.advance(3)
protocol.sendMessage.assert_has_calls([
call('{"type": "ping"}'),
call('{"type": "ping"}')])
def test_factory(self):
factory = RelayFactory('dummy relay', {
'url': 'wss://foo/',
'self': {
'id': 'bot-user-id',
'name': 'thebotuser',
}
})
protocol = factory.buildProtocol('addr')
self.assertEqual(protocol.factory, factory)
self.assertEqual(protocol.bot_user_id, 'bot-user-id')
| |
#!/usr/bin/env python
# pylint: disable=missing-docstring,no-self-use
import json
import pytest
from six.moves.urllib.parse import quote, urlencode
from oic.oauth2 import Grant
from oic.oauth2.exception import GrantError, MissingEndpoint, ResponseError
from oic.oauth2.message import AccessTokenResponse, AuthorizationResponse, \
AuthorizationErrorResponse, AuthorizationRequest, GrantExpired, \
ErrorResponse, AccessTokenRequest, RefreshAccessTokenRequest, \
MissingRequiredAttribute, FormatError
from oic.utils import time_util
from oic.oauth2 import Client
from oic.oauth2 import Server
from oic.oauth2 import Token
from oic.utils.keyio import KeyBundle
from utils_for_tests import _eq, url_compare # pylint: disable=import-error
__author__ = 'rohe0002'
ACC_TOK_RESP = AccessTokenResponse(access_token="2YotnFZFEjr1zCsicMWpAA",
token_type="example",
refresh_token="tGzv3JOkF0XG5Qx2TlKWIA",
scope=["inner", "outer"])
class TestClient(object):
@pytest.fixture(autouse=True)
def create_client(self):
self.redirect_uri = "https://example.com/redirect"
self.authorization_endpoint = "https://example.com/authz"
self.client = Client(
"1") # pylint: disable=attribute-defined-outside-init
self.client.redirect_uris = [self.redirect_uri]
self.client.response_type = "code"
self.client.authorization_endpoint = self.authorization_endpoint
def test_construct_authz_req_no_optional_params(self):
areq = self.client.construct_AuthorizationRequest(
request_args={"response_type": ["code"]})
assert areq["redirect_uri"] == self.redirect_uri
assert areq["response_type"] == ["code"]
assert areq["client_id"] == "1"
assert "state" not in areq
assert "scope" not in areq
def test_construct_authz_req_no_input(self):
self.client.response_type = ["code"]
atr = self.client.construct_AuthorizationRequest()
assert atr["redirect_uri"] == self.redirect_uri
assert atr["response_type"] == ["code"]
assert atr["client_id"] == "1"
def test_construct_authz_req_optional_params(self):
req_args = {"response_type": ["code"], "scope": ["foo", "bar"],
"state": "abc"}
areq = self.client.construct_AuthorizationRequest(request_args=req_args)
assert areq["redirect_uri"] == self.redirect_uri
assert areq["response_type"] == ["code"]
assert areq["client_id"] == "1"
assert areq["state"] == "abc"
assert areq["scope"] == ["foo", "bar"]
def test_construct_authz_req_replace_default_state(self):
req_args = {"response_type": ["code"], "scope": ["foo", "bar"],
"state": "efg"}
areq = self.client.construct_AuthorizationRequest(request_args=req_args)
assert areq["redirect_uri"] == self.redirect_uri
assert areq["response_type"] == ["code"]
assert areq["client_id"] == "1"
assert areq["state"] == "efg"
assert areq["scope"] == ["foo", "bar"]
def test_parse_authz_resp_url(self):
code = "SplxlOBeZQQYbYS6WxSbIA"
state = "ghi"
url = "{}?code={}&state={}".format(self.redirect_uri, code, state)
aresp = self.client.parse_response(AuthorizationResponse,
info=url, sformat="urlencoded")
assert aresp["code"] == code
assert aresp["state"] == state
assert self.client.grant[state].code == aresp["code"]
assert self.client.grant[state].grant_expiration_time
def test_parse_authz_resp_query(self):
query = "code=SplxlOBeZQQYbYS6WxSbIA&state=hij"
aresp = self.client.parse_response(AuthorizationResponse,
info=query, sformat="urlencoded")
assert aresp["code"] == "SplxlOBeZQQYbYS6WxSbIA"
assert aresp["state"] == "hij"
assert self.client.grant["hij"]
assert self.client.grant["hij"].code == aresp["code"]
assert self.client.grant["hij"].grant_expiration_time
def test_parse_authz_resp_query_multi_scope(self):
code = "SplxlOBeZQQYbYS6WxSbIA"
states = ["ghi", "hij", "klm"]
for state in states:
self.client.parse_response(AuthorizationResponse,
info="code={}&state={}".format(code,
state),
sformat="urlencoded")
for state in states:
assert self.client.grant[state].code == code
assert _eq(self.client.grant.keys(), states)
def test_parse_authz_resp_query_unknown_parameter(self):
query = "code=SplxlOBeZQQYbYS6WxSbIA&state=xyz&foo=bar"
aresp = self.client.parse_response(AuthorizationResponse,
info=query, sformat="urlencoded")
assert aresp["code"] == "SplxlOBeZQQYbYS6WxSbIA"
assert aresp["state"] == "xyz"
# assert "foo" not in aresp # TODO unknown parameter not discarded
assert self.client.grant["xyz"]
assert self.client.grant["xyz"].code == aresp["code"]
assert self.client.grant["xyz"].grant_expiration_time
def test_construct_access_token_req(self):
grant = Grant()
grant.code = "AbCdEf"
grant.grant_expiration_time = time_util.utc_time_sans_frac() + 30
self.client.grant = {"stat": grant}
# scope is default=""
atr = self.client.construct_AccessTokenRequest(state="stat")
assert atr["grant_type"] == "authorization_code"
assert atr["code"] == "AbCdEf"
assert atr["redirect_uri"] == self.redirect_uri
def test_construct_access_token_request_fail(self):
with pytest.raises(GrantError):
self.client.construct_AccessTokenRequest(state="unknown")
def test_construct_access_token_req_override(self):
grant = Grant()
grant.code = "AbCdEf"
grant.grant_expiration_time = time_util.utc_time_sans_frac() + 30
self.client.grant = {"xyz": grant}
atr = self.client.construct_AccessTokenRequest(state="xyz")
assert atr["grant_type"] == "authorization_code"
assert atr["code"] == "AbCdEf"
assert atr["redirect_uri"] == self.redirect_uri
def test_parse_access_token_resp(self):
atr = AccessTokenResponse(access_token="2YotnFZFEjr1zCsicMWpAA",
token_type="example", expires_in=3600,
refresh_token="tGzv3JOkF0XG5Qx2TlKWIA",
example_parameter="example_value")
self.client.parse_response(AccessTokenResponse,
info=json.dumps(atr.to_dict()))
_grant = self.client.grant[""]
assert len(_grant.tokens) == 1
token = _grant.tokens[0]
assert token.access_token == "2YotnFZFEjr1zCsicMWpAA"
assert token.token_type == "example"
assert token.expires_in == 3600
assert token.refresh_token == "tGzv3JOkF0XG5Qx2TlKWIA"
# I'm dropping parameters I don't recognize
# with pytest.raises(AttributeError): # TODO not satisfied
# nothing = token.example_parameter
def test_get_access_token_refresh_with_refresh_token(self):
self.client.grant["foo"] = Grant()
_get = time_util.utc_time_sans_frac() + 60
self.client.grant["foo"].grant_expiration_time = _get
self.client.grant["foo"].code = "access_code"
resp = AccessTokenResponse(refresh_token="refresh_with_me",
access_token="access")
token = Token(resp)
self.client.grant["foo"].tokens.append(token)
# Uses refresh_token from previous response
atr = self.client.construct_RefreshAccessTokenRequest(token=token)
assert atr["grant_type"] == "refresh_token"
assert atr["refresh_token"] == "refresh_with_me"
def test_get_access_token_refresh_from_state(self):
self.client.grant["foo"] = Grant()
_get = time_util.utc_time_sans_frac() + 60
self.client.grant["foo"].grant_expiration_time = _get
self.client.grant["foo"].code = "access_code"
resp = AccessTokenResponse(refresh_token="refresh_with_me",
access_token="access")
self.client.grant["foo"].tokens.append(Token(resp))
# Uses refresh_token from previous response
atr = self.client.construct_RefreshAccessTokenRequest(state="foo")
assert isinstance(atr, RefreshAccessTokenRequest)
assert atr["grant_type"] == "refresh_token"
assert atr["refresh_token"] == "refresh_with_me"
def test_parse_authz_err_resp(self):
error = "access_denied"
state = "xyz"
ruri = "{}?error={}&state={}".format(self.redirect_uri, error, state)
resp = self.client.parse_response(AuthorizationResponse,
info=ruri, sformat="urlencoded")
assert isinstance(resp, AuthorizationErrorResponse)
assert resp["error"] == error
assert resp["state"] == state
def test_return_non_existant_grant(self):
assert self.client.grant_from_state("123456abcdef") is None
def test_get_grant(self):
resp = AuthorizationResponse(code="code", state="state")
grant = Grant()
grant.add_code(resp)
self.client.grant["state"] = grant
assert self.client.grant_from_state("state").code == "code"
def test_construct_access_token_req_with_extra_args(self):
query = "code=SplxlOBeZQQYbYS6WxSbIA&state=abc"
self.client.parse_response(AuthorizationResponse,
info=query, sformat="urlencoded")
req = self.client.construct_AccessTokenRequest(state="abc",
extra_args={
"foo": "bar"})
assert _eq(req.keys(), ["code", "grant_type", "client_id",
"redirect_uri", "foo"])
assert req["foo"] == "bar"
def test_construct_TokenRevocationRequest(self):
self.client.grant["foo"] = Grant()
_get = time_util.utc_time_sans_frac() + 60
self.client.grant["foo"].grant_expiration_time = _get
self.client.grant["foo"].code = "access_code"
resp = AccessTokenResponse(refresh_token="refresh_with_me",
access_token="access")
token = Token(resp)
self.client.grant["foo"].tokens.append(token)
state = "foo"
query = "code=SplxlOBeZQQYbYS6WxSbIA&state={}".format(state)
self.client.parse_response(AuthorizationResponse,
info=query, sformat="urlencoded")
req = self.client.construct_TokenRevocationRequest(state=state)
assert _eq(req.keys(), ['token'])
assert req["token"] == "access"
def test_request_info_simple(self):
req_args = {"state": "hmm", "response_type": "code"}
uri, body, h_args, cis = self.client.request_info(AuthorizationRequest,
request_args=req_args)
# default == "POST"
assert uri == self.authorization_endpoint
body_elts = body.split('&')
expected_body = "state=hmm&redirect_uri={}&response_type=code&client_id=1".format(
quote(self.redirect_uri, safe=""))
expected_body_elts = expected_body.split('&')
assert set(body_elts) == set(expected_body_elts)
assert h_args == {
'headers': {'Content-Type': 'application/x-www-form-urlencoded'}}
assert isinstance(cis, AuthorizationRequest)
def test_request_info_simple_get(self):
uri, body, h_args, cis = self.client.request_info(AuthorizationRequest,
method="GET")
assert url_compare(uri,
'{}?redirect_uri={}&response_type=code&client_id=1'.format(
self.authorization_endpoint,
quote(self.redirect_uri, safe="")))
assert body is None
assert h_args == {}
def test_request_info_simple_get_with_req_args(self):
uri, body, h_args, cis = self.client.request_info(
AuthorizationRequest, method="GET", request_args={"state": "init"})
assert url_compare(uri,
'{}?state=init&redirect_uri={}&response_type=code&client_id=1'.format(
self.authorization_endpoint,
quote(self.redirect_uri, safe="")))
assert body is None
assert h_args == {}
assert isinstance(cis, AuthorizationRequest)
def test_request_info_simple_get_with_extra_args(self):
uri, body, h_args, cis = self.client.request_info(
AuthorizationRequest, method="GET", extra_args={"rock": "little"})
assert url_compare(uri,
'{}?redirect_uri={}&response_type=code&client_id=1&rock=little'.format(
self.authorization_endpoint,
quote(self.redirect_uri, safe="")))
assert body is None
assert h_args == {}
assert isinstance(cis, AuthorizationRequest)
def test_request_info_with_req_and_extra_args(self):
uri, body, h_args, cis = self.client.request_info(
AuthorizationRequest,
method="GET",
request_args={"state": "init"},
extra_args={"rock": "little"})
expected = '{}?state=init&redirect_uri={}&response_type=code&client_id=1&rock=little'
assert url_compare(uri, expected.format(self.authorization_endpoint,
quote(self.redirect_uri,
safe="")))
assert body is None
assert h_args == {}
assert isinstance(cis, AuthorizationRequest)
def test_construct_access_token_req_expired_grant(self):
resp = AuthorizationResponse(code="code", state="state")
grant = Grant(-10) # expired grant
grant.add_code(resp)
client = Client()
client.grant["openid"] = grant
with pytest.raises(GrantExpired):
client.construct_AccessTokenRequest(state="openid")
def test_parse_access_token_resp_json(self):
atr = self.client.parse_response(AccessTokenResponse,
info=ACC_TOK_RESP.to_json())
assert _eq(atr.keys(),
['token_type', 'scope', 'access_token', 'refresh_token'])
def test_parse_access_token_resp_urlencoded(self):
uatr = self.client.parse_response(AccessTokenResponse,
info=ACC_TOK_RESP.to_urlencoded(),
sformat="urlencoded")
assert _eq(uatr.keys(),
['token_type', 'scope', 'access_token', 'refresh_token'])
def test_parse_access_token_resp_url(self):
url = "{}?{}".format("https://example.com/token",
ACC_TOK_RESP.to_urlencoded())
uatr = self.client.parse_response(AccessTokenResponse, info=url,
sformat="urlencoded")
assert _eq(uatr.keys(),
['token_type', 'scope', 'access_token', 'refresh_token'])
def test_parse_error_resp(self):
err = ErrorResponse(error="invalid_request",
error_description="Something was missing",
error_uri="http://example.com/error_message.html")
jerr = err.to_json()
uerr = err.to_urlencoded()
_ = self.client.parse_response(AccessTokenResponse, info=jerr)
_ = self.client.parse_response(AccessTokenResponse, info=uerr,
sformat="urlencoded")
with pytest.raises(ResponseError):
self.client.parse_response(AccessTokenResponse, info=jerr, sformat="urlencoded")
with pytest.raises(ValueError):
self.client.parse_response(AccessTokenResponse, info=uerr)
with pytest.raises(FormatError):
self.client.parse_response(AccessTokenResponse, info=jerr, sformat="focus")
def test_parse_access_token_resp_missing_attribute(self):
atresp = AccessTokenResponse(access_token="SlAV32hkKG",
token_type="Bearer",
refresh_token="8xLOxBtZp8",
expire_in=3600)
atdict = atresp.to_dict()
del atdict["access_token"] # remove required access_token
atj = json.dumps(atdict)
with pytest.raises(MissingRequiredAttribute):
self.client.parse_response(AccessTokenResponse, info=atj)
with pytest.raises(MissingRequiredAttribute):
self.client.parse_response(AccessTokenResponse,
info=urlencode(atdict),
sformat='urlencoded')
def test_client_parse_args(self):
args = {
"response_type": "",
"client_id": "client_id",
"redirect_uri": "http://example.com/authz",
"scope": "scope",
"state": "state",
}
ar_args = self.client._parse_args(AuthorizationRequest, **args)
assert _eq(ar_args.keys(), ['scope', 'state', 'redirect_uri',
'response_type', 'client_id'])
def test_client_parse_extra_args(self):
args = {
"response_type": "",
"client_id": "client_id",
"redirect_uri": "http://example.com/authz",
"scope": "scope",
"state": "state",
"extra_session": "home"
}
ar_args = self.client._parse_args(AuthorizationRequest, **args)
assert _eq(ar_args.keys(), ['state', 'redirect_uri', 'response_type',
'client_id', 'scope', 'extra_session'])
def test_client_endpoint(self):
self.client.authorization_endpoint = "https://example.org/oauth2/as"
self.client.token_endpoint = "https://example.org/oauth2/token"
self.client.token_revocation_endpoint = "https://example.org/oauth2/token_rev"
assert self.client._endpoint(
"authorization_endpoint") == "https://example.org/oauth2/as"
assert self.client._endpoint(
"token_endpoint") == "https://example.org/oauth2/token"
assert self.client._endpoint(
"token_revocation_endpoint") == "https://example.org/oauth2/token_rev"
auth_endpoint = self.client._endpoint("authorization_endpoint", **{
"authorization_endpoint": "https://example.com/as"})
assert auth_endpoint == "https://example.com/as"
self.client.token_endpoint = ""
with pytest.raises(MissingEndpoint):
self.client._endpoint("token_endpoint")
self.client._endpoint("foo_endpoint")
class TestServer(object):
@pytest.fixture(autouse=True)
def create_server(self):
self.srv = Server() # pylint: disable=attribute-defined-outside-init
def test_parse_authz_req(self):
ar = AuthorizationRequest(response_type=["code"],
client_id="foobar",
redirect_uri="http://foobar.example.com/oaclient",
state="cold")
uencq = ar.to_urlencoded()
areq = self.srv.parse_authorization_request(query=uencq)
assert isinstance(areq, AuthorizationRequest)
assert areq["response_type"] == ["code"]
assert areq["client_id"] == "foobar"
assert areq["redirect_uri"] == "http://foobar.example.com/oaclient"
assert areq["state"] == "cold"
urluenc = "%s?%s" % ("https://example.com/authz", uencq)
areq = self.srv.parse_authorization_request(url=urluenc)
assert isinstance(areq, AuthorizationRequest)
assert areq["response_type"] == ["code"]
assert areq["client_id"] == "foobar"
assert areq["redirect_uri"] == "http://foobar.example.com/oaclient"
assert areq["state"] == "cold"
def test_parse_jwt_request(self):
ar = AuthorizationRequest(response_type=["code"],
client_id="foobar",
redirect_uri="http://foobar.example.com/oaclient",
state="cold")
self.srv.keyjar["foobar"] = KeyBundle([
{"kty": "oct", "key": "A1B2C3D4".encode("utf-8"), "use": "ver"},
{"kty": "oct", "key": "A1B2C3D4".encode("utf-8"), "use": "sig"}])
self.srv.keyjar[""] = KeyBundle([
{"kty": "oct", "key": "A1B2C3D4".encode("utf-8"), "use": "ver"},
{"kty": "oct", "key": "A1B2C3D4".encode("utf-8"), "use": "sig"}])
keys = self.srv.keyjar.get_signing_key(owner="foobar")
_jwt = ar.to_jwt(key=keys, algorithm="HS256")
req = self.srv.parse_jwt_request(txt=_jwt)
assert isinstance(req, AuthorizationRequest)
assert req["response_type"] == ["code"]
assert req["client_id"] == "foobar"
assert req["redirect_uri"] == "http://foobar.example.com/oaclient"
assert req["state"] == "cold"
def test_server_parse_token_request(self):
atr = AccessTokenRequest(
grant_type="authorization_code", code="SplxlOBeZQQYbYS6WxSbIA",
redirect_uri="https://client.example.com/cb", extra="foo")
uenc = atr.to_urlencoded()
tr = self.srv.parse_token_request(body=uenc)
assert isinstance(tr, AccessTokenRequest)
assert _eq(tr.keys(), ['code', 'redirect_uri', 'grant_type', 'extra'])
assert tr["grant_type"] == "authorization_code"
assert tr["code"] == "SplxlOBeZQQYbYS6WxSbIA"
tr = self.srv.parse_token_request(body=uenc)
assert isinstance(tr, AccessTokenRequest)
assert _eq(tr.keys(), ['code', 'grant_type', 'redirect_uri', 'extra'])
assert tr["extra"] == "foo"
def test_server_parse_refresh_token_request(self):
ratr = RefreshAccessTokenRequest(refresh_token="ababababab",
client_id="Client_id")
uenc = ratr.to_urlencoded()
tr = self.srv.parse_refresh_token_request(body=uenc)
assert isinstance(tr, RefreshAccessTokenRequest)
assert tr["refresh_token"] == "ababababab"
assert tr["client_id"] == "Client_id"
| |
#! /usr/bin/pythonw
#
# This file is part of DBGraffle released under the MIT license.
# See the LICENSE for more information.
#
# Written by Paul Davis
# Today is August 11, 2005
# DBGraffle for OmniGraffle 4
# An extension of DBGraffle which is outdated
# after a most impressive 2 day release.
# Requires:
#
# OmniGraffle 4
# http://www.omnigroup.com/applications/omnigraffle/
#
# Python
# http://www.python.org
#
# AppScript
# http://freespace.virgin.net/hamish.sanderson/appscript.html
#
# PyGreSQL
# http://www.pygresql.org
#
import sys
import re
from appscript import *
from pg import DB
####################################################
# Some default settings for OmniGraffle Graphics #
####################################################
# Common to title and all types of columns.
common_props = {}
common_props[ k.shadow_vector ] = [ 7.0, 7.0 ]
common_props[ k.shadow_fuzziness ] = 17.45
common_props[ k.autosizing ] = k.full
common_props[ k.text_placement ] = k.top
common_props[ k.draws_stroke ] = False
common_props[ k.fill ] = k.linear_fill
common_props[ k.fill_color ] = [ 1, 1, 1 ]
common_props[ k.gradient_center ] = [ 0.5, 0 ]
common_props[ k.magnets ] = [ [ 1, 0 ], [ -1, 0 ] ]
#common_props[ k.size ] = [ 90, 14 ]
#Table Name
table_name = common_props.copy()
table_name[ k.gradient_color ] = [ 0, 0, 1 ]
#Primary Keys
column_pkey = common_props.copy()
column_pkey[ k.gradient_color ] = [ 1, 0, 0 ]
#Foreign Keys
column_fkey = common_props.copy()
column_fkey[ k.gradient_color ] = [ 0, 1, 0 ]
#No Key
column_norm = common_props.copy()
column_norm[ k.gradient_color ] = [ 1, 1, 1 ]
#Line Properties
line_props = {}
line_props[ k.line_type ] = k.orthogonal
line_props[ k.head_type ] = "FilledArrow"
line_props[ k.jump ] = True
###########################################
# The query used to gather schema data. #
###########################################
query = """
select c.table_name,
c.column_name,
c.data_type,
c.is_nullable,
tc.constraint_type,
ccu.table_name as referenced_table_name,
ccu.column_name as referenced_column_name
from information_schema.columns as c
left join
information_schema.key_column_usage as kcu
using (table_catalog, table_schema, table_name, column_name)
left join
information_schema.table_constraints as tc
on (
tc.constraint_name = kcu.constraint_name
and tc.table_schema = kcu.table_schema
and tc.table_name = kcu.table_name
)
left join
information_schema.constraint_column_usage as ccu
on (
ccu.constraint_name = kcu.constraint_name
and ccu.constraint_schema = kcu.constraint_schema
and ccu.constraint_catalog = kcu.constraint_catalog
and exists
(
select 'x'
from information_schema.referential_constraints as rc
where rc.constraint_name = kcu.constraint_name
)
)
where c.table_schema = 'SCHEMA_NAME'
order by
c.table_name,
c.ordinal_position
"""
#########################
# Method definitions. #
#########################
#Get the command line arguments
def parseArguments( argv, options ):
"""
I haven't taken the time to learn getopt, so I use regular expressions.
"""
options[ 'graffle' ] = 'OmniGraffle Professional'
options[ 'dbhost' ] = 'localhost'
options[ 'dbport' ] = 5432
options[ 'dbuser' ] = ''
options[ 'dbname' ] = ''
options[ 'schema' ] = 'public'
okeys = options.keys() ;
for i in range( len( argv ) ):
for j in range( len( okeys ) ):
opt = re.compile( okeys[j] + '=' )
if( opt.match( argv[i] ) ):
options[ okeys[j] ] = opt.sub( '', argv[i] )
options[ 'query' ] = re.compile( 'SCHEMA_NAME' ).sub( options[ 'schema' ], query )
#Get the information we need to draw from the database
def getSchemaInfo( options, sql_tables, sql_references ):
"""
Connect to the database and retrieve our schema information.
"""
conn = DB( options[ 'dbname' ], options[ 'dbhost' ], int( options[ 'dbport' ] ), user=options[ 'dbuser' ] )
res = conn.query( options[ 'query' ] ).dictresult()
for i in range( len( res ) ):
ftbl = res[i][ 'table_name' ]
fcol = res[i][ 'column_name' ]
type = res[i][ 'data_type' ]
nullable = res[i][ 'is_nullable' ]
keytype = res[i][ 'constraint_type' ]
ttbl = res[i][ 'referenced_table_name' ]
tcol = res[i][ 'referenced_column_name' ]
if not sql_tables.has_key( ftbl ):
sql_tables[ ftbl ] = []
sql_tables[ ftbl ] += [ [ fcol, type, nullable, keytype ] ]
if keytype == 'FOREIGN KEY' :
sql_references += [ [ ftbl, fcol, ttbl, tcol ] ]
#Create a table in OmniGraffle from database info
def createOGTableFromSQLTable( graffle, name, sql_table, og_tables ):
"""
Create a table in OmniGraffle using data from the database
"""
shapes = []
graphics = graffle.windows[1].document.canvases[1].graphics
graphics.end.make( new=k.shape, with_properties=table_name )
shape = graphics.last.get()
shape.text.set( name )
shapes += [ shape ]
use_props = None
for i in range( len( sql_table ) ):
if sql_table[i][3] == 'PRIMARY KEY' :
use_props = column_pkey
elif sql_table[i][3] == 'FOREIGN KEY' :
use_props = column_fkey
else :
use_props = column_norm
graphics.end.make( new=k.shape, with_properties=use_props )
shape = graphics.last.get()
shape.text.set( sql_table[i][0] )
shapes += [ shape ]
og_tables[ name ] = graffle.assemble( shapes, table_shape=[len( sql_table)+1,1] )
og_tables[ name ].slide( by={ k.x:25,k.y:25} )
#Get the source and destination graphics for a line to be drawn
def getOGGraphicsFromReference( sql_reference, og_tables ) :
ftbl = og_tables[ sql_reference[0] ]
fg = None
for col in ftbl.columns[1].graphics.get() :
if( col.text.get() == sql_reference[1] ) :
fg = col.get() ;
break ;
else:
raise RuntimeError, "Failed to find graphic for " + sql_reference[0] + "( " + sql_reference[1] + " )"
ttbl = og_tables[ sql_reference[2] ]
tg = None
for col in ttbl.columns[1].graphics.get() :
if( col.text.get() == sql_reference[3] ) :
tg = col.get() ;
break ;
else:
raise RuntimeError, "Failed to find graphic for " + sql_reference[2] + "( " + sql_reference[3] + " )"
return [ fg, tg ]
#Draw a line representing a reference in the database.
def createOGLineFromReference( graffle, sql_reference, og_tables ) :
tgs = getOGGraphicsFromReference( sql_reference, og_tables )
tgs[0].connect( to=tgs[1], with_properties=line_props )
#####################
# Run the script. #
#####################
options = {}
sql_tables = {}
sql_references = []
og_tables = {}
parseArguments( sys.argv, options )
graffle = app( options[ 'graffle' ] )
getSchemaInfo( options, sql_tables, sql_references )
for key in sql_tables.keys() :
createOGTableFromSQLTable( graffle, key, sql_tables[ key ], og_tables )
graffle.windows[1].document.canvases[1].layout_info.properties.set( { k.random_start:False, k.animates:True, k.type:k.force_directed, k.edge_force:20.0 } )
graffle.windows[1].document.canvases[1].layout()
for i in range( len( sql_references ) ) :
createOGLineFromReference( graffle, sql_references[ i ], og_tables )
| |
"""Training script with multi-scale inputs for the DeepLab-ResNet network on the PASCAL VOC dataset
for semantic image segmentation.
This script trains the model using augmented PASCAL VOC,
which contains approximately 10000 images for training and 1500 images for validation.
"""
from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import tensorflow as tf
import numpy as np
from deeplab_resnet import DeepLabResNetModel, ImageReader, decode_labels, inv_preprocess, prepare_label
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
BATCH_SIZE = 1
DATA_DIRECTORY = '/home/VOCdevkit'
DATA_LIST_PATH = './dataset/train.txt'
GRAD_UPDATE_EVERY = 10
IGNORE_LABEL = 255
INPUT_SIZE = '321,321'
LEARNING_RATE = 2.5e-4
MOMENTUM = 0.9
NUM_CLASSES = 21
NUM_STEPS = 20001
POWER = 0.9
RANDOM_SEED = 1234
RESTORE_FROM = './deeplab_resnet.ckpt'
SAVE_NUM_IMAGES = 1
SAVE_PRED_EVERY = 1000
SNAPSHOT_DIR = './snapshots/'
WEIGHT_DECAY = 0.0005
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--grad-update-every", type=int, default=GRAD_UPDATE_EVERY,
help="Number of steps after which gradient update is applied.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--is-training", action="store_true",
help="Whether to update the running means and variances during the training.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES,
help="How many images to save.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
return parser.parse_args()
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def main():
"""Create the model and start the training."""
args = get_arguments()
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
tf.set_random_seed(args.random_seed)
# Create queue coordinator.
coord = tf.train.Coordinator()
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(
args.data_dir,
args.data_list,
input_size,
args.random_scale,
args.random_mirror,
args.ignore_label,
IMG_MEAN,
coord)
image_batch, label_batch = reader.dequeue(args.batch_size)
image_batch075 = tf.image.resize_images(image_batch, [int(h * 0.75), int(w * 0.75)])
image_batch05 = tf.image.resize_images(image_batch, [int(h * 0.5), int(w * 0.5)])
# Create network.
with tf.variable_scope('', reuse=False):
net = DeepLabResNetModel({'data': image_batch}, is_training=args.is_training, num_classes=args.num_classes)
with tf.variable_scope('', reuse=True):
net075 = DeepLabResNetModel({'data': image_batch075}, is_training=args.is_training, num_classes=args.num_classes)
with tf.variable_scope('', reuse=True):
net05 = DeepLabResNetModel({'data': image_batch05}, is_training=args.is_training, num_classes=args.num_classes)
# For a small batch size, it is better to keep
# the statistics of the BN layers (running means and variances)
# frozen, and to not update the values provided by the pre-trained model.
# If is_training=True, the statistics will be updated during the training.
# Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
# if they are presented in var_list of the optimiser definition.
# Predictions.
raw_output100 = net.layers['fc1_voc12']
raw_output075 = net075.layers['fc1_voc12']
raw_output05 = net05.layers['fc1_voc12']
raw_output = tf.reduce_max(tf.stack([raw_output100,
tf.image.resize_images(raw_output075, tf.shape(raw_output100)[1:3,]),
tf.image.resize_images(raw_output05, tf.shape(raw_output100)[1:3,])]), axis=0)
# Which variables to load. Running means and variances are not trainable,
# thus all_variables() should be restored.
restore_var = [v for v in tf.global_variables() if 'fc' not in v.name or not args.not_restore_last]
all_trainable = [v for v in tf.trainable_variables() if 'beta' not in v.name and 'gamma' not in v.name]
fc_trainable = [v for v in all_trainable if 'fc' in v.name]
conv_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0
fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0
fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0
assert(len(all_trainable) == len(fc_trainable) + len(conv_trainable))
assert(len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))
# Predictions: ignoring all predictions with labels greater or equal than n_classes
raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])
raw_prediction100 = tf.reshape(raw_output100, [-1, args.num_classes])
raw_prediction075 = tf.reshape(raw_output075, [-1, args.num_classes])
raw_prediction05 = tf.reshape(raw_output05, [-1, args.num_classes])
label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]
label_proc075 = prepare_label(label_batch, tf.stack(raw_output075.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False)
label_proc05 = prepare_label(label_batch, tf.stack(raw_output05.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False)
raw_gt = tf.reshape(label_proc, [-1,])
raw_gt075 = tf.reshape(label_proc075, [-1,])
raw_gt05 = tf.reshape(label_proc05, [-1,])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)
indices075 = tf.squeeze(tf.where(tf.less_equal(raw_gt075, args.num_classes - 1)), 1)
indices05 = tf.squeeze(tf.where(tf.less_equal(raw_gt05, args.num_classes - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
gt075 = tf.cast(tf.gather(raw_gt075, indices075), tf.int32)
gt05 = tf.cast(tf.gather(raw_gt05, indices05), tf.int32)
prediction = tf.gather(raw_prediction, indices)
prediction100 = tf.gather(raw_prediction100, indices)
prediction075 = tf.gather(raw_prediction075, indices075)
prediction05 = tf.gather(raw_prediction05, indices05)
# Pixel-wise softmax loss.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
loss100 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction100, labels=gt)
loss075 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction075, labels=gt075)
loss05 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction05, labels=gt05)
l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]
reduced_loss = tf.reduce_mean(loss) + tf.reduce_mean(loss100) + tf.reduce_mean(loss075) + tf.reduce_mean(loss05) + tf.add_n(l2_losses)
# Processed predictions: for visualisation.
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])
raw_output_up = tf.argmax(raw_output_up, dimension=3)
pred = tf.expand_dims(raw_output_up, dim=3)
# Image summary.
images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)
labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)
preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)
total_summary = tf.summary.image('images',
tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]),
max_outputs=args.save_num_images) # Concatenate row-wise.
summary_writer = tf.summary.FileWriter(args.snapshot_dir,
graph=tf.get_default_graph())
# Define loss and optimisation parameters.
base_lr = tf.constant(args.learning_rate)
step_ph = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))
opt_conv = tf.train.MomentumOptimizer(learning_rate, args.momentum)
opt_fc_w = tf.train.MomentumOptimizer(learning_rate * 10.0, args.momentum)
opt_fc_b = tf.train.MomentumOptimizer(learning_rate * 20.0, args.momentum)
# Define a variable to accumulate gradients.
accum_grads = [tf.Variable(tf.zeros_like(v.initialized_value()),
trainable=False) for v in conv_trainable + fc_w_trainable + fc_b_trainable]
# Define an operation to clear the accumulated gradients for next batch.
zero_op = [v.assign(tf.zeros_like(v)) for v in accum_grads]
# Compute gradients.
grads = tf.gradients(reduced_loss, conv_trainable + fc_w_trainable + fc_b_trainable)
# Accumulate and normalise the gradients.
accum_grads_op = [accum_grads[i].assign_add(grad / args.grad_update_every) for i, grad in
enumerate(grads)]
grads_conv = accum_grads[:len(conv_trainable)]
grads_fc_w = accum_grads[len(conv_trainable) : (len(conv_trainable) + len(fc_w_trainable))]
grads_fc_b = accum_grads[(len(conv_trainable) + len(fc_w_trainable)):]
# Apply the gradients.
train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable))
train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))
train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))
train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=10)
# Load variables if the checkpoint is provided.
if args.restore_from is not None:
loader = tf.train.Saver(var_list=restore_var)
load(loader, sess, args.restore_from)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(args.num_steps):
start_time = time.time()
feed_dict = { step_ph : step }
loss_value = 0
# Clear the accumulated gradients.
sess.run(zero_op, feed_dict=feed_dict)
# Accumulate gradients.
for i in range(args.grad_update_every):
_, l_val = sess.run([accum_grads_op, reduced_loss], feed_dict=feed_dict)
loss_value += l_val
# Normalise the loss.
loss_value /= args.grad_update_every
# Apply gradients.
if step % args.save_pred_every == 0:
images, labels, summary, _ = sess.run([image_batch, label_batch, total_summary, train_op], feed_dict=feed_dict)
summary_writer.add_summary(summary, step)
save(saver, sess, args.snapshot_dir, step)
else:
sess.run(train_op, feed_dict=feed_dict)
duration = time.time() - start_time
print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import eventlet
import unittest
from nose.plugins.attrib import attr
from oslo.config import cfg
from heat.tests import fakes
from heat.tests.utils import stack_delete_after
from heat.common import identifier
from heat.common import template_format
from heat.engine import parser
from heat.engine import service
from heat.engine.resources import instance
from heat.common import context
from heat.engine.resources import wait_condition as wc
test_template_metadata = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"S1": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"files" : {
"/tmp/random_file" : {
"content" : { "Fn::Join" : ["", [
"s2-ip=", {"Fn::GetAtt": ["S2", "PublicIp"]}
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
test_template_waitcondition = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a WaitCondition.",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"S1": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"test" : {"Fn::GetAtt": ["WC", "Data"]}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : { "Fn::Join" : [ "", [ "#!/bin/bash -v\n",
"echo ",
{ "Ref" : "WH" },
"\n" ] ] }
}
},
"WH" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"WC" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn": "S1",
"Properties" : {
"Handle" : {"Ref" : "WH"},
"Timeout" : "5"
}
}
}
}
'''
@attr(tag=['unit', 'resource', 'Metadata'])
@attr(speed='slow')
class MetadataRefreshTest(unittest.TestCase):
'''
The point of the test is to confirm that metadata gets updated
when FnGetAtt() returns something different.
gets called.
'''
def setUp(self):
self.m = mox.Mox()
self.m.StubOutWithMock(eventlet, 'sleep')
self.fc = fakes.FakeKeystoneClient()
def tearDown(self):
self.m.UnsetStubs()
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the stack is properly cleaned up
def create_stack(self, stack_name='test_stack', params={}):
temp = template_format.parse(test_template_metadata)
template = parser.Template(temp)
parameters = parser.Parameters(stack_name, template, params)
ctx = context.get_admin_context()
ctx.tenant_id = 'test_tenant'
stack = parser.Stack(ctx, stack_name, template, parameters,
disable_rollback=True)
self.stack_id = stack.store()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_active')
instance.Instance.handle_create().AndReturn(None)
instance.Instance.check_active().AndReturn(True)
instance.Instance.handle_create().AndReturn(None)
instance.Instance.check_active().AndReturn(True)
self.m.StubOutWithMock(instance.Instance, 'FnGetAtt')
return stack
@stack_delete_after
def test_FnGetAtt(self):
self.stack = self.create_stack()
instance.Instance.FnGetAtt('PublicIp').AndReturn('1.2.3.5')
# called by metadata_update()
instance.Instance.FnGetAtt('PublicIp').AndReturn('10.0.0.5')
self.m.ReplayAll()
self.stack.create()
self.assertEqual(self.stack.state, self.stack.CREATE_COMPLETE)
s1 = self.stack.resources['S1']
s2 = self.stack.resources['S2']
files = s1.metadata['AWS::CloudFormation::Init']['config']['files']
cont = files['/tmp/random_file']['content']
self.assertEqual(s2.CREATE_COMPLETE, s2.state)
self.assertEqual(cont, 's2-ip=1.2.3.5')
s1.metadata_update()
s2.metadata_update()
files = s1.metadata['AWS::CloudFormation::Init']['config']['files']
cont = files['/tmp/random_file']['content']
self.assertEqual(cont, 's2-ip=10.0.0.5')
self.m.VerifyAll()
@attr(tag=['unit', 'resource', 'Metadata'])
@attr(speed='slow')
class WaitCondMetadataUpdateTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
self.ctx = context.get_admin_context()
self.ctx.tenant_id = 'test_tenant'
self.fc = fakes.FakeKeystoneClient()
self.man = service.EngineService('a-host', 'a-topic')
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://127.0.0.1:8000/v1/waitcondition')
def tearDown(self):
self.m.UnsetStubs()
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the stack is properly cleaned up
def create_stack(self, stack_name='test_stack'):
temp = template_format.parse(test_template_waitcondition)
template = parser.Template(temp)
parameters = parser.Parameters(stack_name, template, {})
stack = parser.Stack(self.ctx, stack_name, template, parameters,
disable_rollback=True)
self.stack_id = stack.store()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_active')
instance.Instance.handle_create().AndReturn(None)
instance.Instance.check_active().AndReturn(True)
self.m.StubOutWithMock(wc.WaitConditionHandle, 'keystone')
wc.WaitConditionHandle.keystone().MultipleTimes().AndReturn(self.fc)
id = identifier.ResourceIdentifier('test_tenant', stack.name,
stack.id, '', 'WH')
self.m.StubOutWithMock(wc.WaitConditionHandle, 'identifier')
wc.WaitConditionHandle.identifier().MultipleTimes().AndReturn(id)
self.m.StubOutWithMock(eventlet, 'sleep')
return stack
@stack_delete_after
def test_wait_meta(self):
'''
1 create stack
2 assert empty instance metadata
3 service.metadata_update()
4 assert valid waitcond metadata
5 assert valid instance metadata
'''
self.stack = self.create_stack()
watch = self.stack['WC']
inst = self.stack['S1']
def check_empty(sleep_time):
self.assertEqual(watch.FnGetAtt('Data'), '{}')
self.assertEqual(inst.metadata['test'], None)
def update_metadata(id, data, reason):
self.man.metadata_update(self.ctx,
dict(self.stack.identifier()),
'WH',
{'Data': data, 'Reason': reason,
'Status': 'SUCCESS', 'UniqueId': id})
def post_success(sleep_time):
update_metadata('123', 'foo', 'bar')
eventlet.sleep(mox.IsA(int)).WithSideEffects(check_empty)
eventlet.sleep(mox.IsA(int)).WithSideEffects(post_success)
self.m.ReplayAll()
self.stack.create()
self.assertEqual(self.stack.state, self.stack.CREATE_COMPLETE)
self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo"}')
self.assertEqual(inst.metadata['test'], '{"123": "foo"}')
update_metadata('456', 'blarg', 'wibble')
self.assertEqual(watch.FnGetAtt('Data'),
'{"123": "foo", "456": "blarg"}')
self.assertEqual(inst.metadata['test'],
'{"123": "foo", "456": "blarg"}')
self.m.VerifyAll()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module contains the implementation of RNN cell wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import numbers
import sys
import types as python_types
import warnings
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
class DropoutWrapperBase(object):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self,
cell,
input_keep_prob=1.0,
output_keep_prob=1.0,
state_keep_prob=1.0,
variational_recurrent=False,
input_size=None,
dtype=None,
seed=None,
dropout_state_filter_visitor=None,
**kwargs):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
[A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell. **Note**
the state components to which dropout is applied when `state_keep_prob`
is in `(0, 1)` are also determined by the argument
`dropout_state_filter_visitor` (e.g. by default dropout is never applied
to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same dropout
pattern is applied across all time steps per run call. If this parameter
is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff** `variational_recurrent
= True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns a scalar or
depth=1 structure of Python booleans describing which terms in the state
should be dropped out. In addition, if the function returns `True`,
dropout is applied across this sublevel. If the function returns
`False`, dropout is not applied across this entire sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects: ```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState): # Never perform dropout on the c
state. return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray): return False return True ```
**kwargs: dict of keyword arguments for base layer.
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapperBase, self).__init__(cell, dtype=dtype, **kwargs)
if (dropout_state_filter_visitor is not None and
not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d" %
(attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._input_size = input_size
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self.cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, inputs_shape):
self.cell.build(inputs_shape)
self.built = True
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, unused_index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self,
values,
salt_prefix,
recurrent_noise,
keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout_v2(
v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input", self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state", self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output", self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
def get_config(self):
"""Returns the config of the dropout wrapper."""
config = {
"input_keep_prob": self._input_keep_prob,
"output_keep_prob": self._output_keep_prob,
"state_keep_prob": self._state_keep_prob,
"variational_recurrent": self._variational_recurrent,
"input_size": self._input_size,
"seed": self._seed,
}
if self._dropout_state_filter != _default_dropout_state_filter_visitor:
function, function_type, function_module = _serialize_function_to_config(
self._dropout_state_filter)
config.update({"dropout_fn": function,
"dropout_fn_type": function_type,
"dropout_fn_module": function_module})
base_config = super(DropoutWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "dropout_fn" in config:
config = config.copy()
dropout_state_filter = _parse_config_to_function(
config, custom_objects, "dropout_fn", "dropout_fn_type",
"dropout_fn_module")
config.pop("dropout_fn")
config["dropout_state_filter_visitor"] = dropout_state_filter
return super(DropoutWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class ResidualWrapperBase(object):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None, **kwargs):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
"""
super(ResidualWrapperBase, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
def get_config(self):
"""Returns the config of the residual wrapper."""
if self._residual_fn is not None:
function, function_type, function_module = _serialize_function_to_config(
self._residual_fn)
config = {"residual_fn": function,
"residual_fn_type": function_type,
"residule_fn_module": function_module}
else:
config = {}
base_config = super(ResidualWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "residual_fn" in config:
config = config.copy()
residual_function = _parse_config_to_function(
config, custom_objects, "residual_fn", "residual_fn_type",
"residule_fn_module")
config["residual_fn"] = residual_function
return super(ResidualWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
class DeviceWrapperBase(object):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device, **kwargs):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
"""
super(DeviceWrapperBase, self).__init__(cell, **kwargs)
self._device = device
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with ops.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
def get_config(self):
config = {"device": self._device}
base_config = super(DeviceWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(function):
"""Serialize the function for get_config()."""
if isinstance(function, python_types.LambdaType):
output = generic_utils.func_dump(function)
output_type = "lambda"
module = function.__module__
elif callable(function):
output = function.__name__
output_type = "function"
module = function.__module__
else:
raise ValueError("Unrecognized function type for input: {}".format(
type(function)))
return output, output_type, module
def _parse_config_to_function(config, custom_objects, func_attr_name,
func_type_attr_name, module_attr_name):
"""Reconstruct the function from the config."""
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn("{} is not loaded, but a layer uses it. "
"It may cause errors.".format(module), UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in wrapper")
elif function_type == "lambda":
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
else:
raise TypeError("Unknown function type:", function_type)
return function
def _default_dropout_state_filter_visitor(substate):
from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple # pylint: disable=g-import-not-at-top
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure, enumerated_fn, *args,
**kwargs)
| |
# -*- coding: utf-8 -*-
"""
Entity properties
=================
Entities are a collection of Properties of different types. These classes
implement the default set of types used in endpoints.
At entity class level, these properties are used in querying entities:
.. code-block:: python
>>> import datetime
>>> Order.ShippedDate > datetime.datetime.now()
'ShippedDate gt 2016-02-19T12:02:04.956226'
>>> Service.query(Order).filter(Order.OrderID == 1234)
Once the entity is instanced, the properties act as data getters and setters:
.. code-block:: python
>>> order = Order()
>>> order.ShippedDate = datetime.datetime.now()
>>> Service.save(order)
Setting a new value to a property marks the property as `dirty`, and will be
sent to the endpoint when :py:func:`~odata.service.ODataService.save` is
called.
This behavior is similar to SQLAlchemy's ORM.
.. automodule:: odata.navproperty
Creating new property types
---------------------------
All properties must subclass :py:class:`PropertyBase`, and implement the
serialization methods. You can then use the created Property class in your
custom defined entities. Replacing the default types is not supported.
.. autoclass:: odata.property.PropertyBase
:members:
----
Types
-----
"""
from decimal import Decimal
import datetime
import dateutil.parser
from .navproperty import NavigationProperty
class PropertyBase(object):
"""
A base class for all properties.
:param name: Name of the property in the endpoint
:param primary_key: This property is a primary key
:param is_collection: This property contains multiple values
"""
def __init__(self, name, primary_key=False, is_collection=False, is_computed_value=False):
"""
:type name: str
:type primary_key: bool
"""
self.name = name
self.primary_key = primary_key
self.is_collection = is_collection
self.is_computed_value = is_computed_value
def __repr__(self):
return '<Property({0})>'.format(self.name)
def __get__(self, instance, owner):
"""
:type instance: odata.entity.EntityBase
:type owner: odata.entity.EntityBase
"""
if instance is None:
return self
es = instance.__odata__
if self.name in es:
raw_data = es[self.name]
if self.is_collection:
if raw_data is None:
return
data = []
for i in raw_data:
data.append(self.deserialize(i))
return data
else:
return self.deserialize(raw_data)
else:
raise AttributeError()
def __set__(self, instance, value):
"""
:type instance: odata.entity.EntityBase
"""
es = instance.__odata__
if self.name in es:
if self.is_collection:
data = []
for i in (value or []):
data.append(self.serialize(i))
new_value = data
else:
new_value = self.serialize(value)
old_value = es[self.name]
if new_value != old_value:
es[self.name] = new_value
es.set_property_dirty(self)
def serialize(self, value):
"""
Called when serializing the value to JSON. Implement this method when
creating a new Property class
:param value: Value given in Python code
:returns: Value that will be used in JSON
"""
raise NotImplementedError()
def deserialize(self, value):
"""
Called when deserializing the value from JSON to Python. Implement this
method when creating a new Property class
:param value: Value received in JSON
:returns: Value that will be passed to Python
"""
raise NotImplementedError()
def escape_value(self, value):
"""
Called when escaping the property value for usage in Query string.
Implement this method when creating a new Property class
:param value: Value of this property
:return: Escaped value that can be used in Query string
"""
if value is None:
return 'null'
return value
def asc(self):
return '{0} asc'.format(self.name)
def desc(self):
return '{0} desc'.format(self.name)
def __eq__(self, other):
value = self.escape_value(other)
return u'{0} eq {1}'.format(self.name, value)
def __ne__(self, other):
value = self.escape_value(other)
return u'{0} ne {1}'.format(self.name, value)
def __ge__(self, other):
value = self.escape_value(other)
return u'{0} ge {1}'.format(self.name, value)
def __gt__(self, other):
value = self.escape_value(other)
return u'{0} gt {1}'.format(self.name, value)
def __le__(self, other):
value = self.escape_value(other)
return u'{0} le {1}'.format(self.name, value)
def __lt__(self, other):
value = self.escape_value(other)
return u'{0} lt {1}'.format(self.name, value)
def startswith(self, value):
value = self.escape_value(value)
return u'startswith({0}, {1})'.format(self.name, value)
def endswith(self, value):
value = self.escape_value(value)
return u'endswith({0}, {1})'.format(self.name, value)
class IntegerProperty(PropertyBase):
"""
Property that stores a plain old integer
"""
def serialize(self, value):
return value
def deserialize(self, value):
return value
class StringProperty(PropertyBase):
"""
Property that stores a unicode string
"""
def serialize(self, value):
return value
def deserialize(self, value):
return value
def escape_value(self, value):
if value is None:
return 'null'
return u"'{0}'".format(value.replace("'", "''"))
class BooleanProperty(PropertyBase):
"""
Property that stores a boolean value
"""
def escape_value(self, value):
if value:
return 'true'
return 'false'
def serialize(self, value):
return bool(value)
def deserialize(self, value):
return bool(value)
class FloatProperty(PropertyBase):
"""
Property that stores a float value
"""
def serialize(self, value):
return value
def deserialize(self, value):
return value
class DecimalProperty(PropertyBase):
"""
Property that stores a decimal value. JSON does not support this directly,
so the value will be transmitted as a float
"""
def escape_value(self, value):
if value is None:
return 'null'
return str(value)
def serialize(self, value):
if value is not None:
return float(value)
def deserialize(self, value):
if value is not None:
return Decimal(str(value))
class DatetimeProperty(PropertyBase):
"""
Property that stores a datetime object. JSON does not support date objects
natively so dates are transmitted as ISO-8601 formatted strings
"""
def escape_value(self, value):
if value is None:
return 'null'
return value.isoformat()
def serialize(self, value):
if isinstance(value, datetime.datetime):
r = value.isoformat()
if value.tzinfo is None:
r += 'Z'
return r
def deserialize(self, value):
if value:
return dateutil.parser.parse(value)
class UUIDProperty(StringProperty):
"""
Property that stores a UUID (also known as GUID) value. JSON does not
support this directly, so the value will be transmitted as a string.
Unlike :py:class:`StringProperty`, it does not escape quotes as query
filters do not use quotes for UUID
"""
def serialize(self, value):
return str(value)
def deserialize(self, value):
return str(value)
def escape_value(self, value):
if value is None:
return 'null'
return str(value)
| |
"""
Unit tests for the sumatra.dependency_finder module
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import object
try:
import unittest2 as unittest
except ImportError:
import unittest
import distutils.spawn
import sumatra.dependency_finder as df
import sys
import os
try:
import numpy
have_numpy = True
except ImportError:
have_numpy = False
import tempfile
import shutil
import warnings
skip_ci = False
if "JENKINS_SKIP_TESTS" in os.environ:
skip_ci = os.environ["JENKINS_SKIP_TESTS"] == "1"
class MockExecutable(object):
def __init__(self, name):
self.name = name
self.path = name
class MockRExecutable(MockExecutable):
def __init__(self, name):
self.name = 'R'
rpath = distutils.spawn.find_executable('Rscript')
if rpath is not None:
self.path = rpath
else:
raise unittest.SkipTest("Can't find Rscript")
@unittest.skipIf(skip_ci, "Skipping test on CI server")
class TestRModuleFunctions(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
self.cwd = os.getcwd()
self.example_project = os.path.join(tmpdir, "R")
assert os.path.exists(self.example_project)
sys.path.append(os.path.abspath(self.example_project))
os.chdir(self.example_project)
def tearDown(self):
sys.path = self.saved_path
os.chdir(self.cwd)
def test__r_extern_script(self):
self.assertEqual(os.path.exists(df.r.r_script_to_find_deps), True)
def test__r_get_r_dependencies(self):
rex = MockRExecutable('R')
myscript_deps = 'pkg::\nname : dplyr \npath : /Library/Frameworks/R.framework/Versions/3.1/Resources/library/dplyr \nversion : 0.4.1 \nsource : CRAN \npkg::\nname : MASS \npath : /Library/Frameworks/R.framework/Versions/3.1/Resources/library/MASS \nversion : 7.3-35 \nsource : CRAN \n'
status, deps = df.r._get_r_dependencies(rex.path, 'myscript.R', depfinder='myscript.R')
self.assertEqual(deps, myscript_deps)
self.assertEqual(status, 0)
def test__r_parse_deps(self):
rex = MockRExecutable('R')
status, deps = df.r._get_r_dependencies(rex.path, 'myscript.R', depfinder='myscript.R')
list_deps = df.r._parse_deps(deps)
d1, d2 = list_deps
self.assertEqual(d1.name, 'dplyr')
self.assertEqual(d1.source, 'CRAN')
self.assertEqual(d1.version, '0.4.1')
self.assertEqual(d2.name, 'MASS')
self.assertEqual(d2.source, 'CRAN')
self.assertEqual(d2.version, '7.3-35')
class TestPythonModuleFunctions(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
self.cwd = os.getcwd()
self.example_project = os.path.join(tmpdir, "python")
assert os.path.exists(self.example_project)
sys.path.append(os.path.abspath(self.example_project))
os.chdir(self.example_project)
def tearDown(self):
sys.path = self.saved_path
os.chdir(self.cwd)
@unittest.skipUnless(have_numpy, "test requires NumPy")
def test__find_versions_by_attribute(self):
import main
self.assertEqual(df.python.find_version_by_attribute(main), "1.2.3a")
del main.__version__
self.assertEqual(df.python.find_version_by_attribute(main), "1.2.3b")
def test__find_versions_from_egg(self):
dep = df.python.Dependency("main", os.path.join(self.example_project, "main.py"))
self.assertEqual(dep.version, 'unknown')
df.python.find_versions_from_egg([dep])
self.assertEqual(dep.version, "1.2.3egg")
@unittest.skipUnless(have_numpy, "test requires NumPy")
def test__find_imported_packages(self):
# the example project has numpy as its only import
example_project_imports = df.python.find_imported_packages(os.path.join(tmpdir, "python", "main.py"), sys.executable)
assert "numpy" in list(example_project_imports.keys())
class TestCoreModuleFunctions(unittest.TestCase):
def setUp(self):
self.example_project = os.path.join(tmpdir, "python")
assert os.path.exists(self.example_project)
self.somemodule_path = os.path.abspath(os.path.join(self.example_project, "subpackage", "somemodule.py"))
def test__find_versions(self):
#better to test this using mocks
dep = df.python.Dependency("main", os.path.join(self.example_project, "main.py"))
df.core.find_versions([dep], [df.python.find_versions_from_egg])
self.assertEqual(dep.version, "1.2.3egg")
def test__find_file_full_path(self):
self.assertEqual(df.core.find_file(os.path.join(self.example_project, "subpackage", "somemodule.py"),
None,
None),
self.somemodule_path)
def test__find_file_current_directory(self):
self.assertEqual(df.core.find_file("somemodule.py",
os.path.join(self.example_project, "subpackage"),
[]),
self.somemodule_path)
def test__find_file_nonexistentfile(self):
self.assertRaises(IOError,
df.core.find_file,
"adifferentmodule.py",
os.path.join(self.example_project, "subpackage"),
[])
def test__find_versions_from_versioncontrol(self):
pass
class TestMainModuleFunctions(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
example_projects = {
'python': os.path.join(tmpdir, "python"),
'neuron': os.path.join(tmpdir, "neuron"),
'r': os.path.join(tmpdir, 'R'),
}
for example_project in example_projects.values():
assert os.path.exists(example_project)
sys.path.append(os.path.abspath(example_project))
def tearDown(self):
sys.path = self.saved_path
def test__find_dependencies_for_a_NEURON_project(self):
deps = df.find_dependencies(os.path.join(tmpdir, "neuron", "main.hoc"),
MockExecutable("NEURON"))
self.assertEqual(os.path.basename(deps[0].path), "dependency.hoc")
@unittest.skipIf(skip_ci, "Skipping test on CI server")
def test__find_dependencies_for_R_project(self):
deps = df.find_dependencies(os.path.join(tmpdir, "R", "myscript.R"),
MockRExecutable("R"))
self.assertEqual(len(deps), 2)
def test__find_dependencies_with_unsupported_executable__should_raise_warning(self):
warnings.filters.append(('error', None, UserWarning, None, 0)) # ought to remove this again afterwards
self.assertRaises(UserWarning,
df.find_dependencies,
os.path.join(tmpdir, "python", "main.py"),
MockExecutable("Perl")) # I'm not saying Perl shouldn't be supported, it just isn't at present
class TestPythonDependency(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
self.example_project = os.path.join(tmpdir, "python")
assert os.path.exists(self.example_project)
sys.path.append(os.path.abspath(self.example_project))
def tearDown(self):
sys.path = self.saved_path
def test__init(self):
dep = df.python.Dependency("main", os.path.join(self.example_project, "main.py"), version="1.2.3b")
self.assertEqual(dep.version, "1.2.3b")
@unittest.skipUnless(have_numpy, "test requires NumPy")
def test__from_module(self):
dep = df.python.Dependency.from_module(sys.modules['numpy'], None)
self.assertEqual(dep.name, "numpy")
def test__str(self):
dep = df.python.Dependency("main", "/some/path")
str(dep)
def test_eq(self):
path = os.path.join(self.example_project, "main.py")
dep1 = df.python.Dependency("main", path)
dep2 = df.python.Dependency("main", path)
self.assertEqual(dep1, dep2)
def test_ne(self):
path = "/some/path"
dep1 = df.python.Dependency("main", path)
dep2 = df.python.Dependency("unittest", path)
self.assertNotEqual(dep1, dep2)
class TestNEURONDependency(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
self.example_project = os.path.join(tmpdir, "neuron")
assert os.path.exists(self.example_project)
sys.path.append(os.path.abspath(self.example_project))
def tearDown(self):
sys.path = self.saved_path
def test__init(self):
dep = df.neuron.Dependency(os.path.join(self.example_project, "main.hoc"))
self.assertEqual(dep.version, "unknown")
def test__str(self):
dep = df.neuron.Dependency(os.path.join(self.example_project, "main.hoc"))
str(dep)
def test_eq(self):
dep1 = df.neuron.Dependency(os.path.join(self.example_project, "main.hoc"))
dep2 = df.neuron.Dependency(os.path.join(self.example_project, "main.hoc"))
self.assertEqual(dep1, dep2)
def test_ne(self):
dep1 = df.neuron.Dependency(os.path.join(self.example_project, "main.hoc"))
dep2 = df.neuron.Dependency(os.path.join(self.example_project, "dependency.hoc"))
self.assertNotEqual(dep1, dep2)
class TestRDependency(unittest.TestCase):
def setUp(self):
self.saved_path = sys.path[:]
self.example_project = os.path.join(tmpdir, "R")
assert os.path.exists(self.example_project)
sys.path.append(os.path.abspath(self.example_project))
def tearDown(self):
sys.path = self.saved_path
def test__init(self):
dep = df.r.Dependency(os.path.join(self.example_project, "myscript.R"))
self.assertEqual(dep.version, "unknown")
def test__str(self):
dep = df.r.Dependency(os.path.join(self.example_project, "myscript.R"))
str(dep)
def test_eq(self):
dep1 = df.r.Dependency(os.path.join(self.example_project, "myscript.R"))
dep2 = df.r.Dependency(os.path.join(self.example_project, "myscript.R"))
self.assertEqual(dep1, dep2)
def test_ne(self):
dep1 = df.r.Dependency(os.path.join(self.example_project, "myscript.R"))
dep2 = df.r.Dependency(os.path.join(self.example_project, "myscript2.R"))
self.assertNotEqual(dep1, dep2)
def setup():
global tmpdir
tmpdir = tempfile.mkdtemp()
shutil.rmtree(tmpdir)
this_directory = os.path.dirname(__file__)
shutil.copytree(os.path.join(this_directory, os.path.pardir, "example_projects"), tmpdir)
print(os.listdir(tmpdir))
def teardown():
global tmpdir
print("removing tmpdir")
shutil.rmtree(tmpdir) # this only gets called when running with nose. Perhaps use atexit, or do this on a class-by-class basis and use __del__
if __name__ == '__main__':
setup()
unittest.main()
teardown()
| |
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite, warn_if_not_float,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable)
from .class_weight import compute_class_weight
from sklearn.utils.sparsetools import minimum_spanning_tree
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"minimum_spanning_tree",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable']
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
arrays = [check_array(x, accept_sparse='csr', ensure_2d=False)
for x in arrays]
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
| |
# Authors: Mark Wronkiewicz <wronk@uw.edu>
# Yousra Bekhti <yousra.bekhti@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from nose.tools import assert_true, assert_raises
from mne import (read_source_spaces, pick_types, read_trans, read_cov,
make_sphere_model, create_info, setup_volume_source_space)
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, simulate_raw
from mne.io import Raw, RawArray
from mne.time_frequency import compute_raw_psd
from mne.utils import _TempDir, run_tests_if_main, requires_scipy_version
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
cov_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-cov.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
bem_path = op.join(data_path, 'subjects', 'sample', 'bem')
src_fname = op.join(bem_path, 'sample-oct-2-src.fif')
bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif')
raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt') # for a different raw
def _make_stc(raw, src):
"""Helper to make a STC"""
seed = 42
sfreq = raw.info['sfreq'] # Hz
tstep = 1. / sfreq
n_samples = len(raw.times) // 10
times = np.arange(0, n_samples) * tstep
stc = simulate_sparse_stc(src, 10, times, random_state=seed)
return stc
def _get_data():
"""Helper to get some starting data"""
# raw with ECG channel
raw = Raw(raw_fname).crop(0., 5.0).preload_data()
data_picks = pick_types(raw.info, meg=True, eeg=True)
other_picks = pick_types(raw.info, meg=False, stim=True, eog=True)
picks = np.sort(np.concatenate((data_picks[::16], other_picks)))
raw = raw.pick_channels([raw.ch_names[p] for p in picks])
ecg = RawArray(np.zeros((1, len(raw.times))),
create_info(['ECG 063'], raw.info['sfreq'], 'ecg'))
for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass',
'filename', 'dig'):
ecg.info[key] = raw.info[key]
raw.add_channels([ecg])
src = read_source_spaces(src_fname)
trans = read_trans(trans_fname)
sphere = make_sphere_model('auto', 'auto', raw.info)
stc = _make_stc(raw, src)
return raw, src, stc, trans, sphere
@testing.requires_testing_data
def test_simulate_raw_sphere():
"""Test simulation of raw data with sphere model"""
seed = 42
raw, src, stc, trans, sphere = _get_data()
assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)
# head pos
head_pos_sim = dict()
# these will be at 1., 2., ... sec
shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]
for time_key, shift in enumerate(shifts):
# Create 4x4 matrix transform and normalize
temp_trans = deepcopy(raw.info['dev_head_t'])
temp_trans['trans'][:3, 3] += shift
head_pos_sim[time_key + 1.] = temp_trans['trans']
#
# Test raw simulation with basic parameters
#
raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
cov_fname, head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
# Test IO on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'sim_test_raw.fif')
raw_sim.save(test_outname)
raw_sim_loaded = Raw(test_outname, preload=True, proj=False,
allow_maxshield=True)
assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
del raw_sim, raw_sim_2
# with no cov (no noise) but with artifacts, most time periods should match
# but the EOG/ECG channels should not
for ecg, eog in ((True, False), (False, True), (True, True)):
raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=eog, ecg=ecg, random_state=seed)
raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=False, ecg=False, random_state=seed)
picks = np.arange(len(raw.ch_names))
diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
these_picks = np.setdiff1d(picks, diff_picks)
close = np.isclose(raw_sim_3[these_picks][0],
raw_sim_4[these_picks][0], atol=1e-20)
assert_true(np.mean(close) > 0.7)
far = ~np.isclose(raw_sim_3[diff_picks][0],
raw_sim_4[diff_picks][0], atol=1e-20)
assert_true(np.mean(far) > 0.99)
del raw_sim_3, raw_sim_4
# make sure it works with EEG-only and MEG-only
raw_sim_meg = simulate_raw(raw.pick_types(meg=True, eeg=False, copy=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_eeg = simulate_raw(raw.pick_types(meg=False, eeg=True, copy=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_meeg = simulate_raw(raw.pick_types(meg=True, eeg=True, copy=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
del raw_sim_meg, raw_sim_eeg, raw_sim_meeg
# check that different interpolations are similar given small movements
raw_sim_cos = simulate_raw(raw, stc, trans, src, sphere,
head_pos=head_pos_sim,
random_state=seed)
raw_sim_lin = simulate_raw(raw, stc, trans, src, sphere,
head_pos=head_pos_sim, interp='linear',
random_state=seed)
assert_allclose(raw_sim_cos[:][0], raw_sim_lin[:][0],
rtol=1e-5, atol=1e-20)
del raw_sim_cos, raw_sim_lin
# Make impossible transform (translate up into helmet) and ensure failure
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[1.][2, 3] -= 0.1 # z trans upward 10cm
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
ecg=False, blink=False, head_pos=head_pos_sim_err)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
bem_fname, ecg=False, blink=False,
head_pos=head_pos_sim_err)
# other degenerate conditions
assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
trans, src, sphere)
stc_bad = stc.copy()
stc_bad.tstep += 0.1
assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
chpi=True) # no cHPI info
assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
interp='foo')
assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=1.)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=hp_fname) # ends up with t>t_end
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[-1.] = head_pos_sim_err[1.] # negative time
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=head_pos_sim_err)
@testing.requires_testing_data
def test_simulate_raw_bem():
"""Test simulation of raw data with BEM"""
seed = 42
raw, src, stc, trans, sphere = _get_data()
raw_sim_sph = simulate_raw(raw, stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_bem = simulate_raw(raw, stc, trans, src, bem_fname, cov=None,
ecg=True, blink=True, random_state=seed,
n_jobs=2)
# some components (especially radial) might not match that well,
# so just make sure that most components have high correlation
assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
picks = pick_types(raw.info, meg=True, eeg=True)
n_ch = len(picks)
corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
assert_true(np.median(np.diag(corr[:n_ch, -n_ch:])) > 0.9)
@requires_scipy_version('0.12')
@testing.requires_testing_data
def test_simulate_raw_chpi():
"""Test simulation of raw data with cHPI"""
with warnings.catch_warnings(record=True): # MaxShield
raw = Raw(raw_chpi_fname, allow_maxshield=True)
sphere = make_sphere_model('auto', 'auto', raw.info)
# make sparse spherical source space
sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
stc = _make_stc(raw, src)
# simulate data with cHPI on
raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True)
# XXX we need to test that the cHPI signals are actually in the correct
# place, but that should be a subsequent enhancement (not trivial to do so)
psd_sim, freqs_sim = compute_raw_psd(raw_sim)
psd_chpi, freqs_chpi = compute_raw_psd(raw_chpi)
assert_array_equal(freqs_sim, freqs_chpi)
hpi_freqs = np.array([x['custom_ref'][0]
for x in raw.info['hpi_meas'][0]['hpi_coils']])
freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs])
picks_meg = pick_types(raw.info, meg=True, eeg=False)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
assert_allclose(psd_sim[picks_eeg], psd_chpi[picks_eeg])
assert_true((psd_chpi[picks_meg][:, freq_idx] >
100 * psd_sim[picks_meg][:, freq_idx]).all())
run_tests_if_main()
| |
''' Google API-based feature extraction classes. '''
import logging
import time
import warnings
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from pliers.extractors.image import ImageExtractor
from pliers.extractors.text import TextExtractor
from pliers.extractors.video import VideoExtractor
from pliers.transformers import (GoogleAPITransformer,
GoogleVisionAPITransformer,
GoogleAPITransformer)
from pliers.extractors.base import ExtractorResult
from pliers.utils import flatten_dict
class GoogleVisionAPIExtractor(GoogleVisionAPITransformer, ImageExtractor):
''' Base class for all Extractors that use the Google Vision API. '''
VERSION = '1.0'
def _extract(self, stims):
request = self._build_request(stims)
responses = self._query_api(request)
results = []
for i, response in enumerate(responses):
if response and self.response_object in response:
raw = response[self.response_object]
results.append(ExtractorResult(raw, stims[i], self))
elif 'error' in response:
raise Exception(response['error']['message'])
else:
results.append(ExtractorResult([{}], stims[i], self))
return results
class GoogleVisionAPIFaceExtractor(GoogleVisionAPIExtractor):
''' Identifies faces in images using the Google Cloud Vision API. '''
request_type = 'FACE_DETECTION'
response_object = 'faceAnnotations'
def _to_df(self, result, handle_annotations=None):
'''
Converts a Google API Face JSON response into a Pandas Dataframe.
Args:
result (ExtractorResult): Result object from which to parse out a
Dataframe.
handle_annotations (str): How returned face annotations should be
handled in cases where there are multiple faces.
'first' indicates to only use the first face JSON object, all
other values will default to including every face.
'''
annotations = result._data
if handle_annotations == 'first':
annotations = [annotations[0]]
face_results = []
for i, annotation in enumerate(annotations):
data_dict = {}
for field, val in annotation.items():
if 'Confidence' in field:
data_dict['face_' + field] = val
elif 'oundingPoly' in field:
for j, vertex in enumerate(val['vertices']):
for dim in ['x', 'y']:
name = '%s_vertex%d_%s' % (field, j+1, dim)
val = vertex[dim] if dim in vertex else np.nan
data_dict[name] = val
elif field == 'landmarks':
for lm in val:
if 'type' in lm:
name = 'landmark_' + lm['type'] + '_%s'
lm_pos = {name %
k: v for (k, v) in lm['position'].items()}
data_dict.update(lm_pos)
else:
data_dict[field] = val
face_results.append(data_dict)
return pd.DataFrame(face_results)
class GoogleVisionAPILabelExtractor(GoogleVisionAPIExtractor):
''' Labels objects in images using the Google Cloud Vision API. '''
request_type = 'LABEL_DETECTION'
response_object = 'labelAnnotations'
def _to_df(self, result):
res = {label['description']: label['score'] for label in result._data if label}
return pd.DataFrame([res])
class GoogleVisionAPIPropertyExtractor(GoogleVisionAPIExtractor):
''' Extracts image properties using the Google Cloud Vision API. '''
request_type = 'IMAGE_PROPERTIES'
response_object = 'imagePropertiesAnnotation'
def _to_df(self, result):
colors = result._data['dominantColors']['colors']
data_dict = {}
for color in colors:
rgb = color['color']
key = [rgb.get('red', 0), rgb.get('green', 0), rgb.get('blue', 0)]
key = ', '.join([str(v) for v in key])
data_dict[key] = color['score']
return pd.DataFrame([data_dict])
class GoogleVisionAPISafeSearchExtractor(GoogleVisionAPIExtractor):
''' Extracts safe search detection using the Google Cloud Vision API. '''
request_type = 'SAFE_SEARCH_DETECTION'
response_object = 'safeSearchAnnotation'
def _to_df(self, result):
return pd.DataFrame([result._data])
class GoogleVisionAPIWebEntitiesExtractor(GoogleVisionAPIExtractor):
''' Extracts web entities using the Google Cloud Vision API. '''
request_type = 'WEB_DETECTION'
response_object = 'webDetection'
def _to_df(self, result):
data_dict = {}
if 'webEntities' in result._data:
for entity in result._data['webEntities']:
if 'description' in entity and 'score' in entity:
data_dict[entity['description']] = entity['score']
return pd.DataFrame([data_dict])
class GoogleVideoIntelligenceAPIExtractor(GoogleAPITransformer, VideoExtractor):
''' Extracts object features from videos using the Google Vision Video
Intelligence API.
Args:
features (list): List of features to extract. LABEL_DETECTION extracts
tags present throughout the provided segments (full video if none
provided) as well as throughout the shots (depending on config).
SHOT_CHANGE_DETECTION extracts a shot feature with onsets and
durations corresponding to shot changes in the video.
EXPLICIT_CONTENT_DETECTION extracts any frame onsets of explicit
material.
segments (list): List of JSON objects or dictionaries. Each dictionary
should contain a startTimeOffset and an endTimeOffset field with
timestamps of the format XX.XXs marking the desired segments upon
which to extract features.
config (dict): JSON object representing the desired configuration for
extraction. See the Google Cloud Video Intelligence documentation
for more details.
timeout (int): Number of seconds to wait for video intelligence
operation to finish. Defaults to 90 seconds.
request_rate (int): Number of seconds to wait between polling the
extraction operation for completion.
discovery_file (str): path to discovery file containing Google
application credentials.
api_version (str): API version to use.
max_results (int): Max number of results per page.
num_retries (int): Number of times to retry query on failure.
rate_limit (int): The minimum number of seconds required between
transform calls on this Transformer.
'''
api_name = 'videointelligence'
_log_attributes = ('discovery_file', 'api_version', 'features', 'segments',
'config', 'timeout', 'request_rate')
def __init__(self, features=['LABEL_DETECTION', 'SHOT_CHANGE_DETECTION',
'EXPLICIT_CONTENT_DETECTION'],
segments=None, config=None, timeout=90, request_rate=5,
discovery_file=None, api_version='v1', max_results=100,
num_retries=3, rate_limit=None):
self.features = features
self.segments = segments
self.config = config
self.timeout = timeout
self.request_rate = request_rate
super().__init__(discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
def _query_api(self, request):
request_obj = self.service.videos().annotate(body=request)
return request_obj.execute(num_retries=self.num_retries)
def _query_operations(self, name):
if hasattr(self.service.operations(), 'get'):
request_obj = self.service.operations().get(name=name)
else:
request_obj = self.service.projects().locations().\
operations().get(name=name)
return request_obj.execute(num_retries=self.num_retries)
def _build_request(self, stim):
context = self.config if self.config else {}
if self.segments:
context['segments'] = self.segments
with stim.get_filename() as filename:
size = os.path.getsize(filename)
LIMIT = 524288000
if size > LIMIT:
warnings.warn("Video file is very large ({} bytes) and may "
"exceed the Google Video Intelligence payload "
"limit ({} bytes).".format(size, LIMIT))
request = {
'inputContent': stim.get_bytestring(),
'features': self.features,
'videoContext': context
}
return request
def _extract(self, stim):
op_request = self._build_request(stim)
operation = self._query_api(op_request)
msg = "Beginning video extraction with a timeout of %fs. Even for "\
"small videos, full extraction may take awhile." % self.timeout
logging.warning(msg)
operation_start = time.time()
response = self._query_operations(operation['name'])
while 'done' not in response and \
(time.time() - operation_start) < self.timeout:
response = self._query_operations(operation['name'])
time.sleep(self.request_rate)
if (time.time() - operation_start) >= self.timeout:
msg = "The extraction reached the timeout limit of %fs, which "\
"means the API may not have finished analyzing the video "\
"and the results may be empty or incomplete." % self.timeout
logging.warning(msg)
return ExtractorResult(response, stim, self)
def _get_onset_duration(self, timing_json):
onset = float(timing_json['startTimeOffset'][:-1])
end = float(timing_json['endTimeOffset'][:-1])
return onset, (end - onset)
def _parse_label(self, data, features, label):
for segment in label.get('segments', []):
onset, duration = self._get_onset_duration(segment['segment'])
score = segment['confidence']
data[(onset, duration)].update({f: score for f in features})
def _parse_frame(self, data, features, annotation, score_key, max_time):
frames = annotation.get('frames', [])
for i, frame in enumerate(frames):
onset = float(frame['timeOffset'][:-1])
if (i + 1) == len(frames):
end = max_time
else:
end = float(frames[i+1]['timeOffset'][:-1])
duration = end - onset
score = frame[score_key]
data[(onset, duration)].update({f: score for f in features})
def _to_df(self, result):
response = result._data.get('response', {})
data = defaultdict(dict)
for r in response.get('annotationResults', []):
for key, res in r.items():
if 'Label' in key:
for annot in res:
feats = [annot['entity']['description']]
for category in annot.get('categoryEntities', []):
feats.append('category_' + category['description'])
if key == 'frameLabelAnnotations':
self._parse_frame(data, feats, annot, 'confidence',
result.stim.duration)
else:
# Good for shot or segment labels
self._parse_label(data, feats, annot)
elif key == 'shotAnnotations':
for i, shot in enumerate(res):
onset, duration = self._get_onset_duration(shot)
data[(onset, duration)].update({
'shot_id': i
})
elif key == 'explicitAnnotation':
feature = 'pornographyLikelihood'
self._parse_frame(data, [feature], res, feature,
result.stim.duration)
df = pd.DataFrame(list(data.values()))
# If multiple confidences were parsed, uses the last one
if len(data) > 0:
onsets, durations = zip(*list(data.keys()))
result._onsets = onsets
result._durations = durations
result.features = list(df.columns)
return df
class GoogleVideoAPILabelDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):
''' Extracts image labels using the Google Video Intelligence API '''
def __init__(self, mode='SHOT_MODE', stationary_camera=False,
segments=None, timeout=90, request_rate=5, num_retries=3,
discovery_file=None, api_version='v1', max_results=100,
rate_limit=None, frame_confidence_threshold=None,
video_confidence_threshold=None):
config = {
'labelDetectionConfig': {
'labelDetectionMode': mode,
'stationaryCamera': stationary_camera
}
}
if frame_confidence_threshold is not None:
if mode not in ['FRAME_MODE', 'SHOT_AND_FRAME_MODE']:
raise ValueError(
"frame_confidence_threshold can only be specified in"
"FRAME or SHOT_AND_FRAME modes.")
else:
config['labelDetectionConfig']['frameConfidenceThreshold'] = \
frame_confidence_threshold
if video_confidence_threshold is not None:
if mode not in ['SHOT_MODE', 'SHOT_AND_FRAME_MODE']:
raise ValueError(
"video_confidence_threshold can only be specified in"
"SHOT or SHOT_AND_FRAME modes.")
else:
config['labelDetectionConfig']['videoConfidenceThreshold'] = \
video_confidence_threshold
super().__init__(features=['LABEL_DETECTION'],
segments=segments,
config=config,
timeout=timeout,
request_rate=request_rate,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleVideoAPIShotDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):
''' Extracts shot changes using the Google Video Intelligence API '''
def __init__(self, segments=None, config=None, timeout=90, request_rate=5,
discovery_file=None, api_version='v1', max_results=100,
num_retries=3, rate_limit=None):
super().__init__(features=['SHOT_CHANGE_DETECTION'],
segments=segments,
config=config,
timeout=timeout,
request_rate=request_rate,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleVideoAPIExplicitDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):
''' Extracts explicit content using the Google Video Intelligence API '''
def __init__(self, segments=None, config=None, timeout=90, request_rate=5,
discovery_file=None, api_version='v1', max_results=100,
num_retries=3, rate_limit=None):
super().__init__(features=['EXPLICIT_CONTENT_DETECTION'],
segments=segments,
config=config,
timeout=timeout,
request_rate=request_rate,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleLanguageAPIExtractor(GoogleAPITransformer, TextExtractor):
''' Extracts natural language features from text documents using the
Google Natural Language API.
Args:
features (list): List of features (str) to extract. Available
features: extractSyntax, extractEntities, extractDocumentSentiment,
extractEntitySentiment, and classifyText. See Google Natural
Language API documentation for more details.
language (str): The ISO-639-1 or BCP-47 identifier for the document
language. If None is provided, API auto-detects the language.
is_html (bool): When True, the document's text is expected to be
HTML. Otherwise, plain text is assumed.
discovery_file (str): path to discovery file containing Google
application credentials.
api_version (str): API version to use.
max_results (int): Max number of results per page.
num_retries (int): Number of times to retry query on failure.
rate_limit (int): The minimum number of seconds required between
transform calls on this Transformer.
'''
api_name = 'language'
_log_attributes = ('discovery_file', 'api_version', 'features',
'language', 'is_html')
def __init__(self, features=['extractSyntax',
'extractEntities',
'extractDocumentSentiment',
'extractEntitySentiment',
'classifyText'],
language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100,
num_retries=3, rate_limit=None):
self.features = features
self.language = language
self.is_html = is_html
super().__init__(discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
def _query_api(self, request):
request_obj = self.service.documents().annotateText(body=request)
return request_obj.execute(num_retries=self.num_retries)
def _build_request(self, stim):
document = {
'type' : 'HTML' if self.is_html else 'PLAIN_TEXT',
'content' : stim.text
}
if self.language:
document['language'] = self.language
request = {
'document': document,
'features': { f : True for f in self.features },
'encodingType': 'UTF32'
}
return request
def _extract(self, stim):
request = self._build_request(stim)
response = self._query_api(request)
return ExtractorResult(response, stim, self)
def _get_span(self, text_json):
offset = text_json['text']['beginOffset']
content = text_json['text']['content']
return { 'begin_char_index' : offset,
'end_char_index' : offset + len(content),
'text' : content }
def _to_df(self, result):
response = result._data
data = []
# One row/object for all document-level features
document_data = {}
if 'extractDocumentSentiment' in self.features:
sentiment = response['documentSentiment']
document_data.update(flatten_dict(sentiment, 'sentiment'))
# Sentence level sentiment
for sentence in response.get('sentences', []):
sentence_data = self._get_span(sentence)
sentiment = sentence['sentiment']
sentence_data.update(flatten_dict(sentiment, 'sentiment'))
data.append(sentence_data)
for category in response.get('categories'):
key = 'category_%s' % category['name']
document_data[key] = category['confidence']
# Include only if there are document-level features
if document_data:
data.append(document_data)
# Entity-level features
for entity in response.get('entities', []):
entity_copy = entity.copy()
mentions = entity_copy.pop('mentions', [])
entity_copy.pop('name', None)
entity_copy = flatten_dict(entity_copy)
for m in mentions:
entity_data = self._get_span(m)
entity_data.update(entity_copy)
# Overwrite top-level sentiment with mention-level
sentiment = m.get('sentiment', {})
entity_data.update(flatten_dict(sentiment, 'sentiment'))
data.append(entity_data)
# Token-level syntax features
for token in response.get('tokens', []):
token_data = self._get_span(token)
token_data['lemma'] = token['lemma']
token_data.update(token['partOfSpeech'])
dependency = flatten_dict(token['dependencyEdge'], 'dependency')
token_data.update(dependency)
data.append(token_data)
df = pd.DataFrame(data)
df['language'] = response['language']
return df
class GoogleLanguageAPIEntityExtractor(GoogleLanguageAPIExtractor):
''' Extracts entity labels in text using the Google Language API '''
def __init__(self, language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100, num_retries=3,
rate_limit=None):
super().__init__(features=['extractEntities'],
language=language,
is_html=is_html,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleLanguageAPISentimentExtractor(GoogleLanguageAPIExtractor):
''' Extracts sentiment of text using the Google Language API '''
def __init__(self, language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100, num_retries=3,
rate_limit=None):
super().__init__(features=['extractDocumentSentiment'],
language=language,
is_html=is_html,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleLanguageAPISyntaxExtractor(GoogleLanguageAPIExtractor):
''' Extracts syntax properties of text using the Google Language API '''
def __init__(self, language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100, num_retries=3,
rate_limit=None):
super().__init__(features=['extractSyntax'],
language=language,
is_html=is_html,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleLanguageAPITextCategoryExtractor(GoogleLanguageAPIExtractor):
''' Extracts document category using the Google Language API.
See the API documentation for the taxonomy of categories:
https://cloud.google.com/natural-language/docs/categories '''
def __init__(self, language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100, num_retries=3,
rate_limit=None):
super().__init__(features=['classifyText'],
language=language,
is_html=is_html,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
class GoogleLanguageAPIEntitySentimentExtractor(GoogleLanguageAPIExtractor):
''' Extracts sentiment of entities found in text using the Google Language
API. Produces identical results to the entity extractor but with additional
sentiment analysis. '''
def __init__(self, language=None, is_html=False, discovery_file=None,
api_version='v1', max_results=100, num_retries=3,
rate_limit=None):
super().__init__(features=['extractEntitySentiment'],
language=language,
is_html=is_html,
discovery_file=discovery_file,
api_version=api_version,
max_results=max_results,
num_retries=num_retries,
rate_limit=rate_limit)
| |
"""Node visitor"""
import ast
from .binopdesc import BinaryOperationDesc
from .boolopdesc import BooleanOperationDesc
from .cmpopdesc import CompareOperationDesc
from .nameconstdesc import NameConstantDesc
from .unaryopdesc import UnaryOperationDesc
from .context import Context
from .loopcounter import LoopCounter
from .tokenendmode import TokenEndMode
class NodeVisitor(ast.NodeVisitor):
LUACODE = "[[luacode]]"
"""Node visitor"""
def __init__(self, context=None, config=None):
self.context = context if context is not None else Context()
self.config = config
self.last_end_mode = TokenEndMode.LINE_FEED
self.output = []
def visit_Assign(self, node):
"""Visit assign"""
target = self.visit_all(node.targets[0], inline=True)
value = self.visit_all(node.value, inline=True)
local_keyword = ""
last_ctx = self.context.last()
if last_ctx["class_name"]:
target = ".".join([last_ctx["class_name"], target])
if "." not in target and not last_ctx["locals"].exists(target):
local_keyword = "local "
last_ctx["locals"].add_symbol(target)
self.emit("{local}{target} = {value}".format(local=local_keyword,
target=target,
value=value))
def visit_AugAssign(self, node):
"""Visit augassign"""
operation = BinaryOperationDesc.OPERATION[node.op.__class__]
target = self.visit_all(node.target, inline=True)
values = {
"left": target,
"right": self.visit_all(node.value, inline=True),
"operation": operation["value"],
}
line = "({})".format(operation["format"])
line = line.format(**values)
self.emit("{target} = {line}".format(target=target, line=line))
def visit_Attribute(self, node):
"""Visit attribute"""
line = "{object}.{attr}"
values = {
"object": self.visit_all(node.value, True),
"attr": node.attr,
}
self.emit(line.format(**values))
def visit_BinOp(self, node):
"""Visit binary operation"""
operation = BinaryOperationDesc.OPERATION[node.op.__class__]
line = "({})".format(operation["format"])
values = {
"left": self.visit_all(node.left, True),
"right": self.visit_all(node.right, True),
"operation": operation["value"],
}
self.emit(line.format(**values))
def visit_BoolOp(self, node):
"""Visit boolean operation"""
operation = BooleanOperationDesc.OPERATION[node.op.__class__]
line = "({})".format(operation["format"])
values = {
"left": self.visit_all(node.values[0], True),
"right": self.visit_all(node.values[1], True),
"operation": operation["value"],
}
self.emit(line.format(**values))
def visit_Break(self, node):
"""Visit break"""
self.emit("break")
def visit_Call(self, node):
"""Visit function call"""
line = "{name}({arguments})"
name = self.visit_all(node.func, inline=True)
arguments = [self.visit_all(arg, inline=True) for arg in node.args]
self.emit(line.format(name=name, arguments=", ".join(arguments)))
def visit_ClassDef(self, node):
"""Visit class definition"""
bases = [self.visit_all(base, inline=True) for base in node.bases]
local_keyword = ""
last_ctx = self.context.last()
if not last_ctx["class_name"] and not last_ctx["locals"].exists(node.name):
local_keyword = "local "
last_ctx["locals"].add_symbol(node.name)
name = node.name
if last_ctx["class_name"]:
name = ".".join([last_ctx["class_name"], name])
values = {
"local": local_keyword,
"name": name,
"node_name": node.name,
}
self.emit("{local}{name} = class(function({node_name})".format(**values))
self.context.push({"class_name": node.name})
self.visit_all(node.body)
self.context.pop()
self.output[-1].append("return {node_name}".format(**values))
self.emit("end, {{{}}})".format(", ".join(bases)))
# Return class object only in the top-level classes.
# Not in the nested classes.
if self.config["class"]["return_at_the_end"] and not last_ctx["class_name"]:
self.emit("return {}".format(name))
def visit_Compare(self, node):
"""Visit compare"""
line = ""
left = self.visit_all(node.left, inline=True)
for i in range(len(node.ops)):
operation = node.ops[i]
operation = CompareOperationDesc.OPERATION[operation.__class__]
right = self.visit_all(node.comparators[i], inline=True)
values = {
"left": left,
"right": right,
}
if isinstance(operation, str):
values["op"] = operation
line += "{left} {op} {right}".format(**values)
elif isinstance(operation, dict):
line += operation["format"].format(**values)
if i < len(node.ops) - 1:
left = right
line += " and "
self.emit("({})".format(line))
def visit_Continue(self, node):
"""Visit continue"""
last_ctx = self.context.last()
line = "goto {}".format(last_ctx["loop_label_name"])
self.emit(line)
def visit_Delete(self, node):
"""Visit delete"""
targets = [self.visit_all(target, inline=True) for target in node.targets]
nils = ["nil" for _ in targets]
line = "{targets} = {nils}".format(targets=", ".join(targets),
nils=", ".join(nils))
self.emit(line)
def visit_Dict(self, node):
"""Visit dictionary"""
keys = []
for key in node.keys:
value = self.visit_all(key, inline=True)
if isinstance(key, ast.Str):
value = "[{}]".format(value)
keys.append(value)
values = [self.visit_all(item, inline=True) for item in node.values]
elements = ["{} = {}".format(keys[i], values[i]) for i in range(len(keys))]
elements = ", ".join(elements)
self.emit("dict {{{}}}".format(elements))
def visit_DictComp(self, node):
"""Visit dictionary comprehension"""
self.emit("(function()")
self.emit("local result = dict {}")
ends_count = 0
for comp in node.generators:
line = "for {target} in {iterator} do"
values = {
"target": self.visit_all(comp.target, inline=True),
"iterator": self.visit_all(comp.iter, inline=True),
}
line = line.format(**values)
self.emit(line)
ends_count += 1
for if_ in comp.ifs:
line = "if {} then".format(self.visit_all(if_, inline=True))
self.emit(line)
ends_count += 1
line = "result[{key}] = {value}"
values = {
"key": self.visit_all(node.key, inline=True),
"value": self.visit_all(node.value, inline=True),
}
self.emit(line.format(**values))
self.emit(" ".join(["end"] * ends_count))
self.emit("return result")
self.emit("end)()")
def visit_Ellipsis(self, node):
"""Visit ellipsis"""
self.emit("...")
def visit_Expr(self, node):
"""Visit expr"""
expr_is_docstring = False
if isinstance(node.value, ast.Str):
expr_is_docstring = True
self.context.push({"docstring": expr_is_docstring})
output = self.visit_all(node.value)
self.context.pop()
self.output.append(output)
def visit_FunctionDef(self, node):
"""Visit function definition"""
line = "{local}function {name}({arguments})"
last_ctx = self.context.last()
name = node.name
if last_ctx["class_name"]:
name = ".".join([last_ctx["class_name"], name])
arguments = [arg.arg for arg in node.args.args]
if node.args.vararg is not None:
arguments.append("...")
local_keyword = ""
if "." not in name and not last_ctx["locals"].exists(name):
local_keyword = "local "
last_ctx["locals"].add_symbol(name)
function_def = line.format(local=local_keyword,
name=name,
arguments=", ".join(arguments))
self.emit(function_def)
self.context.push({"class_name": ""})
self.visit_all(node.body)
self.context.pop()
body = self.output[-1]
if node.args.vararg is not None:
line = "local {name} = list {{...}}".format(name=node.args.vararg.arg)
body.insert(0, line)
arg_index = -1
for i in reversed(node.args.defaults):
line = "{name} = {name} or {value}"
arg = node.args.args[arg_index]
values = {
"name": arg.arg,
"value": self.visit_all(i, inline=True),
}
body.insert(0, line.format(**values))
arg_index -= 1
self.emit("end")
for decorator in reversed(node.decorator_list):
decorator_name = self.visit_all(decorator, inline=True)
values = {
"name": name,
"decorator": decorator_name,
}
line = "{name} = {decorator}({name})".format(**values)
self.emit(line)
def visit_For(self, node):
"""Visit for loop"""
line = "for {target} in {iter} do"
values = {
"target": self.visit_all(node.target, inline=True),
"iter": self.visit_all(node.iter, inline=True),
}
self.emit(line.format(**values))
continue_label = LoopCounter.get_next()
self.context.push({
"loop_label_name": continue_label,
})
self.visit_all(node.body)
self.context.pop()
self.output[-1].append("::{}::".format(continue_label))
self.emit("end")
def visit_Global(self, node):
"""Visit globals"""
last_ctx = self.context.last()
for name in node.names:
last_ctx["globals"].add_symbol(name)
def visit_If(self, node):
"""Visit if"""
test = self.visit_all(node.test, inline=True)
line = "if {} then".format(test)
self.emit(line)
self.visit_all(node.body)
if node.orelse:
if isinstance(node.orelse[0], ast.If):
elseif = node.orelse[0]
elseif_test = self.visit_all(elseif.test, inline=True)
line = "elseif {} then".format(elseif_test)
self.emit(line)
output_length = len(self.output)
self.visit_If(node.orelse[0])
del self.output[output_length]
del self.output[-1]
else:
self.emit("else")
self.visit_all(node.orelse)
self.emit("end")
def visit_IfExp(self, node):
"""Visit if expression"""
line = "{cond} and {true_cond} or {false_cond}"
values = {
"cond": self.visit_all(node.test, inline=True),
"true_cond": self.visit_all(node.body, inline=True),
"false_cond": self.visit_all(node.orelse, inline=True),
}
self.emit(line.format(**values))
def visit_Import(self, node):
"""Visit import"""
line = 'local {asname} = require "{name}"'
values = {"asname": "", "name": ""}
if node.names[0].asname is None:
values["name"] = node.names[0].name
values["asname"] = values["name"]
values["asname"] = values["asname"].split(".")[-1]
else:
values["asname"] = node.names[0].asname
values["name"] = node.names[0].name
self.emit(line.format(**values))
def visit_Index(self, node):
"""Visit index"""
self.emit(self.visit_all(node.value, inline=True))
def visit_Lambda(self, node):
"""Visit lambda"""
line = "function({arguments}) return"
arguments = [arg.arg for arg in node.args.args]
function_def = line.format(arguments=", ".join(arguments))
output = []
output.append(function_def)
output.append(self.visit_all(node.body, inline=True))
output.append("end")
self.emit(" ".join(output))
def visit_List(self, node):
"""Visit list"""
elements = [self.visit_all(item, inline=True) for item in node.elts]
line = "list {{{}}}".format(", ".join(elements))
self.emit(line)
def visit_ListComp(self, node):
"""Visit list comprehension"""
self.emit("(function()")
self.emit("local result = list {}")
ends_count = 0
for comp in node.generators:
line = "for {target} in {iterator} do"
values = {
"target": self.visit_all(comp.target, inline=True),
"iterator": self.visit_all(comp.iter, inline=True),
}
line = line.format(**values)
self.emit(line)
ends_count += 1
for if_ in comp.ifs:
line = "if {} then".format(self.visit_all(if_, inline=True))
self.emit(line)
ends_count += 1
line = "result.append({})"
line = line.format(self.visit_all(node.elt, inline=True))
self.emit(line)
self.emit(" ".join(["end"] * ends_count))
self.emit("return result")
self.emit("end)()")
def visit_Module(self, node):
"""Visit module"""
self.visit_all(node.body)
self.output = self.output[0]
def visit_Name(self, node):
"""Visit name"""
self.emit(node.id)
def visit_NameConstant(self, node):
"""Visit name constant"""
self.emit(NameConstantDesc.NAME[node.value])
def visit_Num(self, node):
"""Visit number"""
self.emit(str(node.n))
def visit_Pass(self, node):
"""Visit pass"""
pass
def visit_Return(self, node):
"""Visit return"""
line = "return "
line += self.visit_all(node.value, inline=True)
self.emit(line)
def visit_Starred(self, node):
"""Visit starred object"""
value = self.visit_all(node.value, inline=True)
line = "unpack({})".format(value)
self.emit(line)
def visit_Str(self, node):
"""Visit str"""
value = node.s
if value.startswith(NodeVisitor.LUACODE):
value = value[len(NodeVisitor.LUACODE):]
self.emit(value)
elif self.context.last()["docstring"]:
self.emit('--[[ {} ]]'.format(node.s))
else:
self.emit('"{}"'.format(node.s))
def visit_Subscript(self, node):
"""Visit subscript"""
line = "{name}[{index}]"
values = {
"name": self.visit_all(node.value, inline=True),
"index": self.visit_all(node.slice, inline=True),
}
self.emit(line.format(**values))
def visit_Tuple(self, node):
"""Visit tuple"""
elements = [self.visit_all(item, inline=True) for item in node.elts]
self.emit(", ".join(elements))
def visit_UnaryOp(self, node):
"""Visit unary operator"""
operation = UnaryOperationDesc.OPERATION[node.op.__class__]
value = self.visit_all(node.operand, inline=True)
line = operation["format"]
values = {
"value": value,
"operation": operation["value"],
}
self.emit(line.format(**values))
def visit_While(self, node):
"""Visit while"""
test = self.visit_all(node.test, inline=True)
self.emit("while {} do".format(test))
continue_label = LoopCounter.get_next()
self.context.push({
"loop_label_name": continue_label,
})
self.visit_all(node.body)
self.context.pop()
self.output[-1].append("::{}::".format(continue_label))
self.emit("end")
def visit_With(self, node):
"""Visit with"""
self.emit("do")
self.visit_all(node.body)
body = self.output[-1]
lines = []
for i in node.items:
line = ""
if i.optional_vars is not None:
line = "local {} = "
line = line.format(self.visit_all(i.optional_vars,
inline=True))
line += self.visit_all(i.context_expr, inline=True)
lines.append(line)
for line in lines:
body.insert(0, line)
self.emit("end")
def generic_visit(self, node):
"""Unknown nodes handler"""
raise RuntimeError("Unknown node: {}".format(node))
def visit_all(self, nodes, inline=False):
"""Visit all nodes in the given list"""
if not inline:
last_ctx = self.context.last()
last_ctx["locals"].push()
visitor = NodeVisitor(context=self.context, config=self.config)
if isinstance(nodes, list):
for node in nodes:
visitor.visit(node)
if not inline:
self.output.append(visitor.output)
else:
visitor.visit(nodes)
if not inline:
self.output.extend(visitor.output)
if not inline:
last_ctx = self.context.last()
last_ctx["locals"].pop()
if inline:
return " ".join(visitor.output)
def emit(self, value):
"""Add translated value to the output"""
self.output.append(value)
| |
# -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc', 'doi_role']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2016, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("https://scipy.org/", "Scipy.org"),
("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax"
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'https://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from code import Code
from model import PropertyType
import cpp_util
import schema_util
class HGenerator(object):
def __init__(self, type_generator):
self._type_generator = type_generator
def Generate(self, namespace):
return _Generator(namespace, self._type_generator).Generate()
class _Generator(object):
"""A .h generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
self._namespace = namespace
self._type_helper = cpp_type_generator
self._generate_error_messages = namespace.compiler_options.get(
'generate_error_messages', False)
def Generate(self):
"""Generates a Code object with the .h for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
)
# Hack: for the purpose of gyp the header file will always be the source
# file with its file extension replaced by '.h'. Assume so.
output_file = os.path.splitext(self._namespace.source_file)[0] + '.h'
ifndef_name = cpp_util.GenerateIfndefName(output_file)
# Hack: tabs and windows have circular references, so only generate hard
# references for them (i.e. anything that can't be forward declared). In
# other cases, generate soft dependencies so that they can include
# non-optional types from other namespaces.
include_soft = self._namespace.name not in ('tabs', 'windows')
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append()
.Append('#include <stdint.h>')
.Append()
.Append('#include <map>')
.Append('#include <memory>')
.Append('#include <string>')
.Append('#include <vector>')
.Append()
.Append('#include "base/logging.h"')
.Append('#include "base/values.h"')
.Cblock(self._type_helper.GenerateIncludes(include_soft=include_soft))
.Append()
)
# Hack: we're not generating soft includes for tabs and windows, so we need
# to generate forward declarations for them.
if not include_soft:
c.Cblock(self._type_helper.GenerateForwardDeclarations())
cpp_namespace = cpp_util.GetCppNamespace(
self._namespace.environment.namespace_pattern,
self._namespace.unix_name)
c.Concat(cpp_util.OpenNamespace(cpp_namespace))
c.Append()
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for prop in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
prop,
'extern const %(type)s %(name)s;')
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(self._FieldDependencyOrder(),
is_toplevel=True,
generate_typedefs=True))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
(c.Concat(cpp_util.CloseNamespace(cpp_namespace))
.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _FieldDependencyOrder(self):
"""Generates the list of types in the current namespace in an order in which
depended-upon types appear before types which depend on them.
"""
dependency_order = []
def ExpandType(path, type_):
if type_ in path:
raise ValueError("Illegal circular dependency via cycle " +
", ".join(map(lambda x: x.name, path + [type_])))
for prop in type_.properties.values():
if (prop.type_ == PropertyType.REF and
schema_util.GetNamespace(prop.ref_type) == self._namespace.name):
ExpandType(path + [type_], self._namespace.types[prop.ref_type])
if not type_ in dependency_order:
dependency_order.append(type_)
for type_ in self._namespace.types.values():
ExpandType([], type_)
return dependency_order
def _GenerateEnumDeclaration(self, enum_name, type_):
"""Generate a code object with the declaration of a C++ enum.
"""
c = Code()
c.Sblock('enum %s {' % enum_name)
c.Append(self._type_helper.GetEnumNoneValue(type_) + ',')
for value in type_.enum_values:
current_enum_string = self._type_helper.GetEnumValue(type_, value)
c.Append(current_enum_string + ',')
c.Append('%s = %s,' % (
self._type_helper.GetEnumLastValue(type_), current_enum_string))
c.Eblock('};')
return c
def _GenerateFields(self, props):
"""Generates the field declarations when declaring a type.
"""
c = Code()
needs_blank_line = False
for prop in props:
if needs_blank_line:
c.Append()
needs_blank_line = True
if prop.description:
c.Comment(prop.description)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# we always need to wrap it in a scoped_ptr.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
(c.Append('%s %s;' % (
self._type_helper.GetCppType(prop.type_, is_ptr=is_ptr),
prop.unix_name))
)
return c
def _GenerateType(self, type_, is_toplevel=False, generate_typedefs=False):
"""Generates a struct for |type_|.
|is_toplevel| implies that the type was declared in the "types" field
of an API schema. This determines the correct function
modifier(s).
|generate_typedefs| controls whether primitive types should be generated as
a typedef. This may not always be desired. If false,
primitive types are ignored.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append()
)
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
if generate_typedefs and type_.description:
c.Comment(type_.description)
c.Cblock(self._GenerateType(type_.item_type, is_toplevel=is_toplevel))
if generate_typedefs:
(c.Append('typedef std::vector<%s > %s;' % (
self._type_helper.GetCppType(type_.item_type),
classname))
)
elif type_.property_type == PropertyType.STRING:
if generate_typedefs:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::string %(classname)s;')
elif type_.property_type == PropertyType.ENUM:
if type_.description:
c.Comment(type_.description)
c.Cblock(self._GenerateEnumDeclaration(classname, type_));
# Top level enums are in a namespace scope so the methods shouldn't be
# static. On the other hand, those declared inline (e.g. in an object) do.
maybe_static = '' if is_toplevel else 'static '
(c.Append()
.Append('%sconst char* ToString(%s as_enum);' %
(maybe_static, classname))
.Append('%s%s Parse%s(const std::string& as_string);' %
(maybe_static, classname, classname))
)
elif type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT):
if type_.description:
c.Comment(type_.description)
(c.Sblock('struct %(classname)s {')
.Append('%(classname)s();')
.Append('~%(classname)s();')
)
(c.Append('%(classname)s(%(classname)s&& rhs);')
.Append('%(classname)s& operator=(%(classname)s&& rhs);')
)
if type_.origin.from_json:
(c.Append()
.Comment('Populates a %s object from a base::Value. Returns'
' whether |out| was successfully populated.' % classname)
.Append('static bool Populate(%s);' % self._GenerateParams(
('const base::Value& value', '%s* out' % classname)))
)
if is_toplevel:
(c.Append()
.Comment('Creates a %s object from a base::Value, or NULL on '
'failure.' % classname)
.Append('static std::unique_ptr<%s> FromValue(%s);' % (
classname, self._GenerateParams(('const base::Value& value',))))
)
if type_.origin.from_client:
value_type = ('base::Value'
if type_.property_type is PropertyType.CHOICES else
'base::DictionaryValue')
(c.Append()
.Comment('Returns a new %s representing the serialized form of this '
'%s object.' % (value_type, classname))
.Append('std::unique_ptr<%s> ToValue() const;' % value_type)
)
if type_.property_type == PropertyType.CHOICES:
# Choices are modelled with optional fields for each choice. Exactly one
# field of the choice is guaranteed to be set by the compiler.
c.Cblock(self._GenerateTypes(type_.choices))
c.Append('// Choices:')
for choice_type in type_.choices:
c.Append('%s as_%s;' % (
self._type_helper.GetCppType(choice_type, is_ptr=True),
choice_type.unix_name))
else:
properties = type_.properties.values()
(c.Append()
.Cblock(self._GenerateTypes(p.type_ for p in properties))
.Cblock(self._GenerateFields(properties)))
if type_.additional_properties is not None:
# Most additionalProperties actually have type "any", which is better
# modelled as a DictionaryValue rather than a map of string -> Value.
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('base::DictionaryValue additional_properties;')
else:
(c.Cblock(self._GenerateType(type_.additional_properties))
.Append('std::map<std::string, %s> additional_properties;' %
self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True))
)
(c.Eblock()
.Append()
.Sblock(' private:')
.Append('DISALLOW_COPY_AND_ASSIGN(%(classname)s);')
.Eblock('};')
)
return c.Substitute({'classname': classname})
def _GenerateEvent(self, event):
"""Generates the namespaces for an event.
"""
c = Code()
# TODO(kalman): use event.unix_name not Classname.
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Concat(self._GenerateEventNameConstant(event))
.Concat(self._GenerateCreateCallbackArguments(event))
.Append('} // namespace %s' % event_namespace)
)
return c
def _GenerateFunction(self, function):
"""Generates the namespaces and structs for a function.
"""
c = Code()
# TODO(kalman): Use function.unix_name not Classname here.
function_namespace = cpp_util.Classname(function.name)
# Windows has a #define for SendMessage, so to avoid any issues, we need
# to not use the name.
if function_namespace == 'SendMessage':
function_namespace = 'PassMessage'
(c.Append('namespace %s {' % function_namespace)
.Append()
.Cblock(self._GenerateFunctionParams(function))
)
if function.callback:
c.Cblock(self._GenerateFunctionResults(function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateFunctionParams(self, function):
"""Generates the struct for passing parameters from JSON to a function.
"""
if not function.params:
return Code()
c = Code()
(c.Sblock('struct Params {')
.Append('static std::unique_ptr<Params> Create(%s);' %
self._GenerateParams(('const base::ListValue& args',)))
.Append('~Params();')
.Append()
.Cblock(self._GenerateTypes(p.type_ for p in function.params))
.Cblock(self._GenerateFields(function.params))
.Eblock()
.Append()
.Sblock(' private:')
.Append('Params();')
.Append()
.Append('DISALLOW_COPY_AND_ASSIGN(Params);')
.Eblock('};')
)
return c
def _GenerateTypes(self, types, is_toplevel=False, generate_typedefs=False):
"""Generate the structures required by a property such as OBJECT classes
and enums.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(type_,
is_toplevel=is_toplevel,
generate_typedefs=generate_typedefs))
return c
def _GenerateCreateCallbackArguments(self, function):
"""Generates functions for passing parameters to a callback.
"""
c = Code()
params = function.params
c.Cblock(self._GenerateTypes((p.type_ for p in params), is_toplevel=True))
declaration_list = []
for param in params:
if param.description:
c.Comment(param.description)
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Append('std::unique_ptr<base::ListValue> Create(%s);' %
', '.join(declaration_list))
return c
def _GenerateEventNameConstant(self, event):
"""Generates a constant string array for the event name.
"""
c = Code()
c.Append('extern const char kEventName[]; // "%s.%s"' % (
self._namespace.name, event.name))
c.Append()
return c
def _GenerateFunctionResults(self, callback):
"""Generates namespace for passing a function's result back.
"""
c = Code()
(c.Append('namespace Results {')
.Append()
.Concat(self._GenerateCreateCallbackArguments(callback))
.Append('} // namespace Results')
)
return c
def _GenerateParams(self, params):
"""Builds the parameter list for a function, given an array of parameters.
"""
# |error| is populated with warnings and/or errors found during parsing.
# |error| being set does not necessarily imply failure and may be
# recoverable.
# For example, optional properties may have failed to parse, but the
# parser was able to continue.
if self._generate_error_messages:
params += ('base::string16* error',)
return ', '.join(str(p) for p in params)
| |
from django.test import TestCase
from django.db.models import ObjectDoesNotExist
from cntapp.models import Directory, Document, SubDirRelation
from .helpers import DocumentFactory, PdfDocumentFactory, DirectoryFactory, init_test_dirs
from .helpers import DOCUMENT_BASE_NAME, DIRECTORY_BASE_NAME
class DocumentTest(TestCase):
def setUp(self):
super(DocumentTest, self).setUp()
DocumentFactory.reset_sequence(force=True)
def test_create_and_delete_document(self):
d = PdfDocumentFactory()
self.assertEqual(DOCUMENT_BASE_NAME + '0.pdf', d.name)
self.assertEqual(1, len(Document.objects.all()))
d = Document.objects.get(id=1)
self.assertEqual(DOCUMENT_BASE_NAME + '0.pdf', d.name)
d.delete()
self.assertEqual(0, len(Document.objects.all()))
def test_add_and_delete_document_in_dir(self):
"""
d1 d0
| __|_________
\ | | |
-->f_1 f_2 f_3
"""
d0 = DirectoryFactory()
d1 = DirectoryFactory()
f_1 = PdfDocumentFactory()
f_2 = PdfDocumentFactory()
f_3 = PdfDocumentFactory()
d0.documents.add(f_1)
d0.documents.add(f_2)
d0.documents.add(f_3)
d1.documents.add(f_1)
self.assertEqual(3, len(d0.documents.all()))
self.assertEqual(1, len(d1.documents.all()))
self.assertEquals(3, len(Document.objects.all()))
self.assertEquals(2, len(f_1.directory_set.all()))
d0.documents.remove(f_1)
self.assertEqual(2, len(d0.documents.all()))
self.assertEqual(1, len(d1.documents.all()))
self.assertEquals(1, len(f_1.directory_set.all()))
d1.documents.remove(f_1)
self.assertEqual(0, len(d1.documents.all()))
self.assertEquals(0, len(f_1.directory_set.all()))
self.assertEquals(3, len(Document.objects.all()))
def test_get_parents(self):
d0 = DirectoryFactory()
d1 = DirectoryFactory()
f_1 = PdfDocumentFactory()
d0.documents.add(f_1)
d1.documents.add(f_1)
self.assertEqual(2, f_1.directory_set.count())
class DirectoryTestCase(TestCase):
def setUp(self):
pass
def create_dir(self, dir_name):
d = Directory(name=dir_name)
d.save()
return d
def test_create_dir(self):
dr = self.create_dir('root')
self.assertIsNotNone(dr)
self.assertEqual(dr.name, 'root')
def test_add_dir(self):
root = self.create_dir('root')
dir_a = self.create_dir('dir_a')
root.add_sub_dir(dir_a)
self.assertIsNotNone(root.sub_dirs.get(name=dir_a.name))
# test avoid duplicate
root.add_sub_dir(dir_a)
self.assertEqual(len(root.sub_dirs.filter(name=dir_a.name)), 1)
# test add multiple sub dirs
dir_b = self.create_dir('dir_b')
root.add_sub_dir(dir_b)
self.assertEqual(len(root.sub_dirs.all()), 2)
def test_get_parents(self):
dir_a = self.create_dir('dir_a')
dir_b = self.create_dir('dir_b')
final_dir = self.create_dir('final_dir')
self.assertEqual(len(final_dir.get_parents()), 0)
dir_a.add_sub_dir(final_dir)
self.assertEqual(len(final_dir.get_parents()), 1)
dir_b.add_sub_dir(final_dir)
self.assertEqual(len(final_dir.get_parents()), 2)
def test_remove_sub_dir(self):
root = self.create_dir('root')
dir_a = self.create_dir('dir_a')
root.add_sub_dir(dir_a)
self.assertEqual(len(root.get_sub_dirs()), 1)
self.assertEqual(len(Directory.objects.all()), 2)
root.remove_sub_dir(dir_a)
self.assertEqual(len(root.get_sub_dirs()), 0)
self.assertEqual(len(Directory.objects.all()), 1)
def test_remove_not_sub_dir(self):
root = self.create_dir('root')
dir_a = self.create_dir('dir_a')
with self.assertRaises(SubDirRelation.DoesNotExist):
root.remove_sub_dir(dir_a)
def test_remove_sub_dir_two_parents(self):
p_a = self.create_dir('parent_a')
p_b = self.create_dir('parent_b')
sub_dir = self.create_dir('dir_a')
p_a.add_sub_dir(sub_dir)
p_b.add_sub_dir(sub_dir)
self.assertEqual(len(sub_dir.get_parents()), 2)
self.assertEqual(len(Directory.objects.all()), 3)
p_a.remove_sub_dir(sub_dir)
# sub_dir should not be removed since it still has a parent !
self.assertEqual(len(Directory.objects.all()), 3)
p_b.remove_sub_dir(sub_dir)
# sub_dir should be removed since it is isolated !
self.assertEqual(len(Directory.objects.all()), 2)
def test_remove_sub_dir_recursively(self):
"""
create the dir graph:
root
/ | \
a b c
\ | |
ab_a /
/ \ /
ab_a_a ab_a_b
"""
root = self.create_dir('root')
a = self.create_dir('a')
b = self.create_dir('b')
c = self.create_dir('c')
ab_a = self.create_dir('ab_a')
ab_a_a = self.create_dir('ab_a')
ab_a_b = self.create_dir('ab_a_b')
root.add_sub_dir(a).add_sub_dir(b).add_sub_dir(c)
a.add_sub_dir(ab_a)
b.add_sub_dir(ab_a)
ab_a.add_sub_dir(ab_a_a).add_sub_dir(ab_a_b)
c.add_sub_dir(ab_a_b)
self.assertEqual(len(Directory.objects.all()), 7)
self.assertEqual(len(SubDirRelation.objects.all()), 8)
a.remove_sub_dir(ab_a)
# object 'ab_a' not removed because there is a link,
# but the there is one link less
self.assertEqual(len(Directory.objects.all()), 7)
self.assertEqual(len(SubDirRelation.objects.all()), 7)
# 'ab_a' and 'ab_a_a' are deleted,
# 'ab_a_b' is not because it's linked to c
b.remove_sub_dir(ab_a)
self.assertEqual(len(Directory.objects.all()), 5)
self.assertEqual(len(SubDirRelation.objects.all()), 4)
try:
Directory.objects.get(name=ab_a.name)
self.fail("'%s' should not exist!" % ab_a.name)
except ObjectDoesNotExist:
pass
try:
Directory.objects.get(name=ab_a_a.name)
self.fail("'%s' should not exist!" % ab_a_a.name)
except ObjectDoesNotExist:
pass
self.assertIsNotNone(Directory.objects.get(name=ab_a_b.name))
c.remove_sub_dir(ab_a_b)
try:
Directory.objects.get(name=ab_a_b.name)
self.fail("'%s' should not exist!" % ab_a_b.name)
except ObjectDoesNotExist:
pass
def test_unlink_directory(self):
init_test_dirs()
ab_a = Directory.objects.get(name='ab_a')
ab_a_a = Directory.objects.get(name='ab_a_a')
self.assertEqual(6, Directory.objects.count())
self.assertIn(ab_a_a, ab_a.get_sub_dirs())
self.assertIn(ab_a, ab_a_a.get_parents())
# correct unlink
self.assertTrue(ab_a.unlink_sub_dir(ab_a_a))
self.assertNotIn(ab_a_a, ab_a.get_sub_dirs())
self.assertNotIn(ab_a, ab_a_a.get_parents())
# incorrect unlink
self.assertFalse(ab_a.unlink_sub_dir(ab_a_a))
self.assertNotIn(ab_a_a, ab_a.get_sub_dirs())
self.assertEqual(6, Directory.objects.count())
def test_get_paths(self):
init_test_dirs()
a = Directory.objects.get(name='a')
b = Directory.objects.get(name='b')
c = Directory.objects.get(name='c')
ab_a = Directory.objects.get(name='ab_a')
ab_a_a = Directory.objects.get(name='ab_a_a')
ab_a_b = Directory.objects.get(name='ab_a_b')
path_1 = [a, ab_a, ab_a_a]
path_2 = [b, ab_a, ab_a_a]
self.assertEqual([[a]], a.get_paths())
self.assertEqual([path_1, path_2], ab_a_a.get_paths())
self.assertEqual(
[
[a, ab_a, ab_a_b],
[b, ab_a, ab_a_b],
[c, ab_a_b],
],
ab_a_b.get_paths()
)
| |
"""SCons.Tool.rpmutils.py
RPM specific helper routines for general usage in the test framework
and SCons core modules.
Since we check for the RPM package target name in several places,
we have to know which machine/system name RPM will use for the current
hardware setup. The following dictionaries and functions try to
mimic the exact naming rules of the RPM source code.
They were directly derived from the file "rpmrc.in" of the version
rpm-4.9.1.3. For updating to a more recent version of RPM, this Python
script can be used standalone. The usage() function below shows the
exact syntax.
"""
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import platform
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine():
""" Return the canonicalized machine name. """
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
oldpy = open(pyfile,"r").readlines()
# Read current rpmrc.in file
rpm = open(rpmrc,"r").readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if not data.has_key(tokens[0]):
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
out = open(pyfile,"w")
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.iteritems():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
out.close()
except:
pass
def usage():
print "rpmutils.py rpmrc.in rpmutils.py"
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set of classes and helper functions for building unit tests for the Mesos CLI.
"""
import io
import os
import pty
import shutil
import subprocess
import sys
import tempfile
import unittest
import parse
from tenacity import retry
from tenacity import stop_after_delay
from tenacity import wait_fixed
from cli import http, config
from cli.tests.constants import TEST_AGENT_IP
from cli.tests.constants import TEST_AGENT_PORT
from cli.tests.constants import TEST_MASTER_IP
from cli.tests.constants import TEST_MASTER_PORT
from cli.exceptions import CLIException
# Timeout used when creating, killing, and getting data from objects that are
# part of our test infrastructure.
TIMEOUT = 5
class CLITestCase(unittest.TestCase):
"""
Base class for CLI TestCases.
"""
@classmethod
def setUpClass(cls):
print("\n{class_name}".format(class_name=cls.__name__))
@staticmethod
def default_mesos_build_dir():
"""
Returns the default path of the Mesos build directory. Useful when
we wish to use some binaries such as mesos-execute.
"""
tests_dir = os.path.dirname(__file__)
cli_dir = os.path.dirname(tests_dir)
lib_dir = os.path.dirname(cli_dir)
cli_new_dir = os.path.dirname(lib_dir)
python_dir = os.path.dirname(cli_new_dir)
src_dir = os.path.dirname(python_dir)
mesos_dir = os.path.dirname(src_dir)
build_dir = os.path.join(mesos_dir, "build")
if os.path.isdir(build_dir):
return build_dir
raise CLIException("The Mesos build directory"
" does not exist: {path}"
.format(path=build_dir))
# This value is set to the correct path when running tests/main.py. We
# set it here to make sure that CLITestCase has a MESOS_BUILD_DIR member.
CLITestCase.MESOS_BUILD_DIR = ""
class Executable():
"""
This class defines the base class for launching an executable for
the CLI unit tests. It will be subclassed by (at least) a
'Master', 'Agent', and 'Task' subclass.
"""
def __init__(self):
self.name = ""
self.executable = ""
self.shell = False
self.flags = {}
self.proc = None
def __del__(self):
if hasattr(self, "proc") and self.proc is not None:
self.kill()
def launch(self):
"""
Launch 'self.executable'. We assume it is in the system PATH.
"""
if self.proc is not None:
raise CLIException("{name} already launched"
.format(name=self.name.capitalize()))
if not os.path.exists(self.executable):
raise CLIException("{name} executable not found"
.format(name=self.name.capitalize()))
try:
flags = ["--{key}={value}".format(key=key, value=value)
for key, value in dict(self.flags).items()]
if self.shell:
cmd = ["/bin/sh", self.executable] + flags
else:
cmd = [self.executable] + flags
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except Exception as exception:
raise CLIException("Failed to launch '{executable}': {error}"
.format(executable=self.executable,
error=exception))
if proc.poll():
raise CLIException("Failed to launch '{executable}': {error}"
.format(executable=self.executable,
error=proc.stdout.read()))
self.proc = proc
def kill(self):
"""
Kills a previously launched executable.
"""
if self.proc is None:
return
try:
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.kill()
self.proc.wait()
self.proc = None
except Exception as exception:
raise CLIException("Could not kill {name}: {error}"
.format(name=self.name, error=exception))
class Master(Executable):
"""
This class defines the functions necessary to launch a master in
the CLI unit tests.
"""
count = 0
def __init__(self, flags=None):
super(Master, self).__init__()
if Master.count > 0:
raise CLIException("Creating more than one master"
" is currently not possible")
if flags is None:
flags = {}
if "ip" not in flags:
flags["ip"] = TEST_MASTER_IP
if "port" not in flags:
flags["port"] = TEST_MASTER_PORT
if "work_dir" not in flags:
flags["work_dir"] = tempfile.mkdtemp()
self.flags = flags
self.name = "master"
self.addr = "{ip}:{port}".format(ip=flags["ip"], port=flags["port"])
self.config = config.Config(None)
self.executable = os.path.join(
CLITestCase.MESOS_BUILD_DIR,
"bin",
"mesos-{name}.sh".format(name=self.name))
self.shell = True
def __del__(self):
super(Master, self).__del__()
if hasattr(self, "flags") and hasattr(self.flags, "work_dir"):
shutil.rmtree(self.flags["work_dir"])
# pylint: disable=arguments-differ
def launch(self):
"""
After starting the master, we need to make sure its
reference count is increased.
"""
super(Master, self).launch()
Master.count += 1
def kill(self):
"""
After killing the master, we need to make sure its
reference count is decreased.
"""
super(Master, self).kill()
Master.count -= 1
class Agent(Executable):
"""
This class defines the functions necessary to launch an agent in
the CLI unit tests.
"""
count = 0
def __init__(self, flags=None):
super(Agent, self).__init__()
if Agent.count > 0:
raise CLIException("Creating more than one agent"
" is currently not possible")
if flags is None:
flags = {}
if "ip" not in flags:
flags["ip"] = TEST_AGENT_IP
if "port" not in flags:
flags["port"] = TEST_AGENT_PORT
if "master" not in flags:
flags["master"] = "{ip}:{port}".format(
ip=TEST_MASTER_IP,
port=TEST_MASTER_PORT)
if "work_dir" not in flags:
flags["work_dir"] = tempfile.mkdtemp()
if "runtime_dir" not in flags:
flags["runtime_dir"] = tempfile.mkdtemp()
# Disabling systemd support on Linux to run without sudo.
if "linux" in sys.platform and "systemd_enable_support" not in flags:
flags["systemd_enable_support"] = "false"
self.flags = flags
self.name = "agent"
self.addr = "{ip}:{port}".format(ip=flags["ip"], port=flags["port"])
self.config = config.Config(None)
self.executable = os.path.join(
CLITestCase.MESOS_BUILD_DIR,
"bin",
"mesos-{name}.sh".format(name=self.name))
self.shell = True
def __del__(self):
super(Agent, self).__del__()
if hasattr(self, "flags") and hasattr(self.flags, "work_dir"):
shutil.rmtree(self.flags["work_dir"])
if hasattr(self, "flags") and hasattr(self.flags, "runtime_dir"):
shutil.rmtree(self.flags["runtime_dir"])
# pylint: disable=arguments-differ
def launch(self):
"""
After starting the agent, we first need to make sure its
reference count is increased and then check that it has
successfully registered with the master before proceeding.
"""
super(Agent, self).launch()
Agent.count += 1
data = http.get_json(self.flags["master"], "slaves", self.config)
if len(data["slaves"]) == 1:
stdout = ""
if self.proc.poll():
stdout = "\n{output}".format(output=self.proc.stdout.read())
raise CLIException("Could not get '/slaves' endpoint as JSON with"
" only 1 agent in it: {stdout}"
.format(stdout=stdout))
# pylint: disable=arguments-differ
def kill(self):
"""
After killing the agent, we need to make sure it has
successfully unregistered from the master before proceeding.
"""
super(Agent, self).kill()
data = http.get_json(self.flags["master"], "slaves", self.config)
if len(data["slaves"]) == 1 and not data["slaves"][0]["active"]:
stdout = ""
if self.proc.poll():
stdout = "\n{output}".format(output=self.proc.stdout.read())
raise CLIException("Could not get '/slaves' endpoint as"
" JSON with 0 agents in it: {stdout}"
.format(stdout=stdout))
Agent.count -= 1
class Task(Executable):
"""
This class defines the functions necessary to launch a task in
the CLI unit tests.
"""
count = 0
def __init__(self, flags=None):
super(Task, self).__init__()
if flags is None:
flags = {}
if "master" not in flags:
flags["master"] = "{ip}:{port}".format(
ip=TEST_MASTER_IP,
port=TEST_MASTER_PORT)
if "name" not in flags:
flags["name"] = "task-{id}".format(id=Task.count)
if "command" not in flags:
raise CLIException("No command supplied when creating task")
self.flags = flags
self.name = flags["name"]
self.config = config.Config(None)
self.executable = os.path.join(
CLITestCase.MESOS_BUILD_DIR,
"src",
"mesos-execute")
def __wait_for_containers(self, condition):
"""
Wait for the agent's '/containers' endpoint
to return data subject to 'condition'.
"""
try:
data = http.get_json(self.flags["master"], "slaves", self.config)
except Exception as exception:
raise CLIException("Could not get '/slaves' endpoint"
" as JSON: {error}"
.format(error=exception))
if len(data["slaves"]) != 1:
raise CLIException("More than one agent detected when"
" reading from '/slaves' endpoint")
try:
agent = parse.parse(
"slave({id})@{addr}",
data["slaves"][0]["pid"])
except Exception as exception:
raise CLIException("Unable to parse agent info: {error}"
.format(error=exception))
try:
data = http.get_json(
agent["addr"],
"containers",
self.config)
condition(data)
except Exception as exception:
raise CLIException("Could not get '/containers' endpoint as"
" JSON subject to condition: {error}"
.format(error=exception))
# pylint: disable=arguments-differ
def launch(self):
"""
After starting the task, we need to make sure its container
has actually been added to the agent before proceeding.
"""
super(Task, self).launch()
Task.count += 1
try:
# pylint: disable=missing-docstring
def container_exists(data):
return any(container["executor_id"] == self.flags["name"]
for container in data)
self.__wait_for_containers(container_exists)
except Exception as exception:
stdout = ""
if self.proc.poll():
stdout = "\n{output}".format(output=self.proc.stdout.read())
self.proc = None
raise CLIException("Waiting for container '{name}'"
" failed: {error}{stdout}"
.format(name=self.flags["name"],
error=exception,
stdout=stdout))
# pylint: disable=arguments-differ
def kill(self):
"""
After killing the task, we need to make sure its container has
actually been removed from the agent before proceeding.
"""
super(Task, self).kill()
try:
# pylint: disable=missing-docstring
def container_does_not_exist(data):
return not any(container["executor_id"] == self.flags["name"]
for container in data)
self.__wait_for_containers(container_does_not_exist)
except Exception as exception:
raise CLIException("Container with name '{name}' still"
" exists after timeout: {error}"
.format(name=self.flags["name"],
error=exception))
Task.count -= 1
def capture_output(command, argv, extra_args=None):
"""
Redirect the output of a command to a string and return it.
"""
if not extra_args:
extra_args = {}
stdout = sys.stdout
sys.stdout = io.StringIO()
try:
command(argv, **extra_args)
except Exception as exception:
# Fix stdout in case something goes wrong
sys.stdout = stdout
raise CLIException("Could not get command output: {error}"
.format(error=exception))
sys.stdout.seek(0)
output = sys.stdout.read().strip()
sys.stdout = stdout
return output
def exec_command(command, env=None, stdin=None, timeout=None):
"""
Execute command.
"""
process = subprocess.Popen(
command,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
universal_newlines=True)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired as exception:
# The child process is not killed if the timeout expires, so in order
# to cleanup properly a well-behaved application should kill the child
# process and finish communication.
# https://docs.python.org/3.5/library/subprocess.html
process.kill()
stdout, stderr = process.communicate()
raise CLIException("Timeout expired: {error}".format(error=exception))
return (process.returncode, stdout, stderr)
def popen_tty(cmd, shell=True):
"""
Open a process with stdin connected to a pseudo-tty.
:param cmd: command to run
:type cmd: str
:returns: (Popen, master) tuple, where master is the master side
of the of the tty-pair. It is the responsibility of the caller
to close the master fd, and to perform any cleanup (including
waiting for completion) of the Popen object.
:rtype: (Popen, int)
"""
master, slave = pty.openpty()
# pylint: disable=subprocess-popen-preexec-fn
proc = subprocess.Popen(cmd,
stdin=slave,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
close_fds=True,
shell=shell)
os.close(slave)
return (proc, master)
def wait_for_task(master, name, state, delay=1):
"""
Wait for a task with a certain name to be in a given state.
"""
@retry(wait=wait_fixed(0.2), stop=stop_after_delay(delay))
def _wait_for_task():
tasks = http.get_json(master.addr, "tasks",
config.Config(None))["tasks"]
for task in tasks:
if task["name"] == name and task["state"] == state:
return task
raise Exception()
try:
return _wait_for_task()
except Exception:
raise CLIException("Timeout waiting for task expired")
| |
import csv
import gzip
import logging
import mmap
import os
import sys
import textwrap
from collections import Counter, defaultdict
from itertools import groupby
from operator import itemgetter
import pandas as pd
import pyfaidx
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import clashchimeras
logger = logging.getLogger('root')
class GFF:
"""GFF file parser for mirbase gff3 file
This class uses memory-mapped file object to read a mirbase gff3 file. It
contains methods to read, process a gff3 file and return genomic coordinates
Attributes:
fileName: A mirbase gff3 file path
"""
def __init__(self, fileName=None):
self.features = {}
self.fileName = fileName
def read(self, featureType='miRNA_primary_transcript'):
"""Reads gff3 file provided during class initialization
Stores the byte positions of every feature in a dict object named
self.features
Keyword Args:
featureType: Feature type of a gff3 record, the third element of every
record in the file. Please change this if you want to store mature
form of microRNA, by default it uses primary transcript
(default 'miRNA_primary_transcript')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith('Name'):
mirbase_name = attribute.split("=")[-1]
self.features[mirbase_name] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid miRNA_primary_transcript name
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.features[name])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith("ID"):
_id = attribute.split("=")[-1]
elif attribute.startswith("Name"):
_name = attribute.split("=")[-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], mirbase_id=_id, mirbase_name=_name)
self.fileHandle.close()
return record
def coordinates(self, name, start=None, end=None):
"""A method to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A tuple of strings containing elements for a bed record
"""
record = self.process(name)
if not start and not end:
start = 1
end = record.end - record.start + 1
positions = {}
match_positions = []
if record.strand == '+':
_start = 1
for relative, actual in enumerate(range(record.start - 1, record.end),
start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
elif record.strand == '-':
_start = 1
for relative, actual in enumerate(reversed(range(record.start - 1,
record.end)), start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
class GTF:
"""GTF file parser for gencode gtf file
This class uses memory-mapped file object to read a gencode gtf file. It
contains methods to read, process a gtf file and return genomic coordinates
Attributes:
fileName: A gencode gtf file path
"""
def __init__(self, fileName=None):
self.features = defaultdict(list)
self.biotypeFeatures = defaultdict(list)
self.geneFeatures = defaultdict(list)
self.fileName = fileName
self.geneIds = {}
def readBiotype(self, featureType='exon', biotype=None):
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
havana_transcript = '-'
havana_gene = '-'
exon_number = '0'
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_type"):
transcript_type = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
exon_number = int(attribute.split(" ")[-1])
elif attribute.startswith("havana_gene"):
havana_gene = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("havana_transcript"):
havana_transcript = attribute.split(" ")[-1][1:-2]
elif attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
gene_name = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_name"):
transcript_name = attribute.split(" ")[-1][1:-1]
if biotype == 'tRNA':
if transcript_type == "tRNAscan":
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
else:
if transcript_type == biotype:
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
self.fileHandle.close()
def read(self, featureType='exon'):
"""Reads gtf file provided during class initialization
Stores the byte positions of every feature in a defaultdict(list) object
named self.features
Keyword Args:
featureType: Feature type of a gtf record, the third element of every
record in the file. Please change this if you want to get specific
records (e.g. 'UTR') (default 'exon')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
self.features[transcript_id].append(bytePosition)
self.geneIds[transcript_id] = gene_id
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
self.geneFeatures[gene_id].append(bytePosition)
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode transcript_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.features[name]
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
_eid = '-'
_enb = '0'
for attribute in attributes:
if attribute.startswith("transcript_type"):
_tt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_id"):
_tid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_id"):
_eid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
_enb = int(attribute.split(" ")[-1])
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], transcript_type=_tt, transcript_id=_tid, exon_id=_eid,
exon_number=_enb, gene_name=_gn)
yield record
self.fileHandle.close()
def geneExonicRegions(self, df):
"""Given a DataFrame with the exon coordinates from Gencode for a single
gene, return the total number of coding regions in that gene.
"""
scaffold = df.iloc[0].scaffold
strand = df.iloc[0].strand
gene_type = df.iloc[0].gene_type
gene_id = df.iloc[0].gene_id
gene_name = df.iloc[0].gene_name
start = df.start.min()
end = df.end.max()
bp = [False] * (end - start + 1)
for i in range(df.shape[0]):
s = df.iloc[i]['start'] - start
e = df.iloc[i]['end'] - start + 1
bp[s:e] = [True] * (e - s)
regions = list(range(start, end + 1))
groups = []
for i, j in groupby(bp):
groups.append((i, len(list(j))))
e_start = 0
for i in groups:
e_end = e_start + i[1]
if i[0]:
record = Record(scaffold=scaffold, start=regions[e_start],
end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,
gene_name=gene_name, strand=strand)
yield record
e_start += i[1]
def geneProcess(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode gene_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.geneFeatures[name]
exons = []
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_type"):
_gt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_id"):
_gid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))
self.fileHandle.close()
exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',
'strand', 'gene_type', 'gene_id', 'gene_name'])
for record in self.geneExonicRegions(exons_df):
yield record
def coordinates(self, name, start=None, end=None):
"""A generator to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A list of tuple(s) of strings containing elements for a bed record. There
may be more than one because of alternate splicing.
"""
if "|" in name:
self.name = name.split("|")[0]
else:
self.name = name
positions = {}
match_positions = []
records = []
segments = []
result_segments = []
for record in self.process(self.name):
records.append(record)
records.sort(key=lambda x: int(x.exon_number))
if records[0].strand == '+':
_start = 1
for record in records:
for relative, actual in enumerate(range(record.start, record.end + 1),
start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(match_positions),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
elif records[0].strand == '-':
_start = 1
for record in records:
for relative, actual in enumerate(reversed(range(record.start,
record.end + 1)), start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(reversed(match_positions)),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
if len(result_segments) == 0:
logger.debug('%s, %s, %s' % (name, start, end))
logger.debug('%s' % str(segments))
for r in records:
logger.debug('%s %s %s %s' % (r.scaffold, r.strand,
r.start, r.end))
return result_segments
class SAM:
"""SAM file parser for parsing bowtie2 generated files
This class uses memory-mapped file object to read a sam file
Attributes:
fileName: A sam file path
"""
def __init__(self, fileName=None):
self.fileName = fileName
self.records = {}
def read(self, flag=0):
"""Reads sam file provided during class initialization
Stores the byte position of every record based on the keyword arg flag
provided, to a dict object named self.records
Keyword Args:
flag: The SAM alignment flag for a record. For default, it uses the
primary alignment for every record and ignores secondary alignments
(default 0)
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
read = line.decode('utf-8').split("\t")
if not read[0].startswith("@") and read[1] == str(flag):
self.records[read[0]] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def access(self, queryName):
"""Provides random access of a record from the sam file
Args:
queryName: The query name of the read from the sam file
Returns:
A list generated after splitting the record line from sam file
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.records[queryName])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
self.fileHandle.close()
return self.pretty(row)
def filterPotentialChimeras(self, min_length=30, flag=0, target=None):
"""Generated a filtered fasta file from a sam file
This filtered fasta file contains reads that can be potentially chimeras.
The criteria for filtering is based on the minimum length
Keyword Args:
min_length: To be selected as a potential chimera, this is the minimum
read length (default 30)
flag: The SAM alignment flag describing the type of alignment (default 0)
target: The prefix for output file
"""
logger.debug('Filtering {} for potential chimeras'.format(target))
target = '{}.filter.fasta'.format(target.rpartition(".")[0])
if os.path.exists(target):
logger.info('Skipping filtering for {}'.format(target))
else:
with open(target, 'w') as oH:
with open(self.fileName) as iH:
for row in csv.reader(iH, delimiter="\t"):
if not row[0].startswith('@') and row[1] == str(flag):
if len(row[9]) >= 30:
print(textwrap.fill('>%s' % row[0], width=80), file=oH)
print(textwrap.fill('%s' % row[9], width=80), file=oH)
logger.debug('Filtering finished')
return target
def pretty(self, row):
refId = row[2]
start = int(row[3])
for i in row[10:]:
if i.startswith('MD'):
mismatchInfo = i
sequence = row[9]
cigar = row[5]
cigarString = clashchimeras.methods.convertCigar(row[5])
matchLength = cigarString.count("M") + cigarString.count("D")
end = start + matchLength - 1
record = Record(refId=refId, start=start, mismatchInfo=mismatchInfo,
sequence=sequence, cigarString=cigarString, matchLength=matchLength,
cigar=cigar, end=end)
return record
class Output:
"""Contains methods for writing output files
This class is used to generate every kind of output generated by this
package which includes plain text, ansi colored text and bed file
Attributes:
target: A prefix for output file which will be automatically followed by
extension (default 'wip')
overlap: Minimum overlap to be set between two molecules when determining
chimera (default 4)
gap: Maximum gap (number of unknown nucleotides) to be allowed between
two molecules within a chimera (default 9)
"""
def __init__(self,
target=None,
smallRNABed=False,
targetRNABed=False,
overlap=4,
gap=9):
self.target = target
self.overlap = overlap
self.gap = gap
if smallRNABed:
self.smallRNABedHandle = open('{}.smallRNA.bed'.format(self.target), 'w')
print('# BED locations of smallRNA part of the identified chimera',
file=self.smallRNABedHandle)
self.smallRNABedCSV = csv.writer(self.smallRNABedHandle, delimiter="\t")
self.smallRNABedCSV.writerow(
['# The name field represents the following:'])
self.smallRNABedCSV.writerow(
['# E.g. 201980-1-48|hsa-mir-100==PAPSS1'])
self.smallRNABedCSV.writerow(
['# 201980-1-48 is the fasta identifier'])
self.smallRNABedCSV.writerow(
["# 201980 is the unique identifier"])
self.smallRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.smallRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.smallRNABedCSV.writerow(
['# hsa-mir-100 represents the smallRNA transcript'])
self.smallRNABedCSV.writerow(
['# PAPSS1 represents the gene symbol for targetRNA transcript '
'transcript '])
if targetRNABed:
self.targetRNABedHandle = open('{}.targetRNA.bed'.format(self.target),
'w')
self.targetRNABedCSV = csv.writer(self.targetRNABedHandle, delimiter="\t")
self.targetRNABedCSV.writerow(
['# The name field represents the following:'])
self.targetRNABedCSV.writerow(
['# E.g. 136019-1-48|ENST00000375759.6|SPEN==hsa-mir-103a-2'])
self.targetRNABedCSV.writerow(
['# 136019-1-48 is the fasta identifier'])
self.targetRNABedCSV.writerow(
["# 136019 is the unique identifier"])
self.targetRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.targetRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.targetRNABedCSV.writerow(
["# ENST00000375759.6 is the targetRNA transcript identifier"])
self.targetRNABedCSV.writerow(
['# SPEN is the gene symbol for for targetRNA transcript '
'ENST00000375759.6'])
self.targetRNABedCSV.writerow(
['# hsa-mir-103a-2 represents the smallRNA transcript '])
self.hybWriter = open('%s.chimeras.tsv' % self.target, 'w')
self.hybComments()
def hybComments(self):
print("# fasta Identifier: The identifier in <sample>.unique.fasta. ",
"#\tE.g. 123456-3-68 ",
"#\t123456 is the unique identifier",
"#\t3 is the number of times that sequence was observed in raw "
"fastq ",
"#\t68 is the length of the sequence", sep="\n", file=self.hybWriter)
print("# smallRNA: The cDNA ID of the type of RNA labelled as smallRNA in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# smallRNA_start: cDNA alignment start position of the smallRNA "
"part of the chimera", file=self.hybWriter)
print("# smallRNA_MDtag: Showing the MD tag from the smallRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# smallRNA_cigar: Cigar string from the smallRNA SAM alignment for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print('# arbitrary_chimera: The chimera representation indicating what '
'part of the sequence represents smallRNA and targetRNA',
'#\t{ is representing a match with smallRNA',
'#\t} is representing a match with targetRNA',
'#\t# is representing unaligned sequences (identified as --gap -ga)',
'#\t- is representing a deletion (D in cigar string)',
'#\t+ is representing a deletion (I in cigar string)',
'#\tE.g {{{{{{{{-{{{{{{{{{{{{{##}}}}}}}}}}+}}}}}}}}}}}}}}}}}}}}}}'
'#\tE.g The first 22 nucleotides are aligning to smallRNA cDNA',
'#\tE.g The last 33 nucleotides are aligning to targetRNA cDNA',
sep="\n", file=self.hybWriter)
print('# read_sequence: The actual sequence that is appeared in raw '
'reads', file=self.hybWriter)
print("# targetRNA: The cDNA ID of the type of RNA labelled as targetRNA "
"in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# targetRNA_start: cDNA alignment start position of the targetRNA "
"part of the chimera", file=self.hybWriter)
print("# targetRNA_MDtag: Showing the MD tag from the targetRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# targetRNA_cigar: Cigar string from the targetRNA SAM alignment '
'for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print("# fasta_Identifier", "smallRNA", "smallRNA_start", "smallRNA_MDtag",
"smallRNA_cigar", "arbitrary_chimera", "read_sequence", "targetRNA",
"targetRNA_start", "targetRNA_MDtag", "targetRNA_cigar", sep="\t",
file=self.hybWriter)
def writeTargetRNABed(self, query, targetRNASegments, smallRNA):
if "ENS" in smallRNA and "|" in smallRNA:
_smallRNA = smallRNA.split("|")[5]
else:
_smallRNA = smallRNA
for segment in targetRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _smallRNA
self.targetRNABedCSV.writerow(_segment)
def writeSmallRNABed(self, query, smallRNASegments, targetRNA):
if "ENS" in targetRNA and "|" in targetRNA:
_targetRNA = targetRNA.split("|")[5]
else:
_targetRNA = targetRNA
for segment in smallRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _targetRNA
self.smallRNABedCSV.writerow(_segment)
def write(self, queryName, smallRNA, targetRNA):
chimeraString = clashchimeras.methods.chimeraOrNot(smallRNA.cigarString,
targetRNA.cigarString, overlap=self.overlap, gap=self.gap)
smallRNARegion = clashchimeras.methods.findRegion(smallRNA)
targetRNARegion = clashchimeras.methods.findRegion(targetRNA)
print(queryName, smallRNARegion, smallRNA.start, smallRNA.mismatchInfo,
smallRNA.cigar, chimeraString, smallRNA.sequence,
targetRNARegion, targetRNA.start,
targetRNA.mismatchInfo, targetRNA.cigar, sep="\t", file=self.hybWriter)
def __del__(self):
self.hybWriter.close()
class Fasta:
def __init__(self, genome=None, gtf=None):
self.genome = genome
self.gtf = gtf
self.faidx = pyfaidx.Fasta(self.genome)
def getBiotype(self, output=None, biotype=None):
self.sequences = []
g = GTF(fileName=self.gtf)
if biotype == 'tRNA':
g.readBiotype(biotype=biotype, featureType='tRNAscan')
else:
g.readBiotype(biotype=biotype)
for transcript_id, exons in g.biotypeFeatures.items():
temp_seq = ''
exons.sort(key=itemgetter(0))
for exon in exons:
if exon[4] == '-':
temp_seq += (-self.faidx[exon[1]][exon[2] - 1:exon[3]]).seq
elif exon[4] == '+':
temp_seq += self.faidx[exon[1]][exon[2] - 1:exon[3]].seq
_id = '{}|{}|{}|{}|{}|{}|{}'.format(transcript_id,
exons[0][5],
exons[0][6],
exons[0][7],
exons[0][8],
exons[0][9],
len(temp_seq))
temp_rec = SeqRecord(seq=Seq(temp_seq), id=_id,
description='')
self.sequences.append(temp_rec)
if not output:
logger.error('Please provide output file..')
sys.exit()
else:
logger.info('Writing {}'.format(output))
SeqIO.write(self.sequences, output, 'fasta')
class Fastq:
def __init__(self, fileName=None, compressed=False):
self.fileName = fileName
self.compressed = compressed
self.n = 4
self.sequences = Counter()
self.uniqueOutput = fileName.rpartition(".")[0] + '.unique.fasta'
def recordIterator(self):
record = []
record_length = 0
for line in self.fileHandle:
if record_length == self.n:
yield record
record_length = 0
record = []
record.append(line.decode().rstrip())
record_length += 1
yield record
def createUnique(self):
if self.compressed:
self.fileHandle = gzip.open(self.fileName, 'rb')
else:
self.fileHandle = open(self.fileName, 'rb')
logger.info('Reading {}'.format(self.fileName))
for record in self.recordIterator():
self.sequences[record[1]] += 1
logger.info('Writing {}'.format(self.uniqueOutput))
with open(self.uniqueOutput, 'w') as wH:
for index, (sequence, counts) in enumerate(sorted(self.sequences.items(),
key=itemgetter(1), reverse=True), start=1):
print('>{}-{}-{}'.format(index, counts, len(sequence)), file=wH)
print(textwrap.fill(sequence, width=80), file=wH)
logger.debug('Finished writing {}'.format(self.uniqueOutput))
self.fileHandle.close()
class Record:
"""A custom object (preferred over dict) for easy access using variables
It's a dependency for GTF and GFF classes
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
| |
# Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.tests.functional import functional_helpers
class GroupSnapshotsTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
_grp_type_name = 'functional_grp_test_type'
osapi_version_major = '3'
osapi_version_minor = '19'
def setUp(self):
super(GroupSnapshotsTest, self).setUp()
self.volume_type = self.api.create_type(self._vol_type_name)
self.group_type = self.api.create_group_type(self._grp_type_name)
def _get_flags(self):
f = super(GroupSnapshotsTest, self)._get_flags()
f['volume_driver'] = (
'cinder.tests.fake_driver.FakeLoggingVolumeDriver')
f['default_volume_type'] = self._vol_type_name
f['default_group_type'] = self._grp_type_name
return f
def test_get_group_snapshots_summary(self):
"""Simple check that listing group snapshots works."""
grp_snaps = self.api.get_group_snapshots(False)
self.assertIsNotNone(grp_snaps)
def test_get_group_snapshots(self):
"""Simple check that listing group snapshots works."""
grp_snaps = self.api.get_group_snapshots()
self.assertIsNotNone(grp_snaps)
def test_create_and_delete_group_snapshot(self):
"""Creates and deletes a group snapshot."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Create group snapshot
created_group_snapshot = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': created_group_id}})
self.assertTrue(created_group_snapshot['id'])
created_group_snapshot_id = created_group_snapshot['id']
# Check it's there
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['creating'])
self.assertEqual(created_group_snapshot_id, found_group_snapshot['id'])
self.assertEqual(created_group_id,
found_group_snapshot['group_id'])
self.assertEqual('available', found_group_snapshot['status'])
# Delete the group snapshot
self.api.delete_group_snapshot(created_group_snapshot_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_create_group_from_group_snapshot(self):
"""Creates a group from a group snapshot."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Create group snapshot
created_group_snapshot = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': created_group_id}})
self.assertTrue(created_group_snapshot['id'])
created_group_snapshot_id = created_group_snapshot['id']
# Check it's there
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['creating'])
self.assertEqual(created_group_snapshot_id, found_group_snapshot['id'])
self.assertEqual(created_group_id,
found_group_snapshot['group_id'])
self.assertEqual('available', found_group_snapshot['status'])
# Create group from group snapshot
created_group_from_snap = self.api.post_group_from_src(
{'create-from-src': {
'group_snapshot_id': created_group_snapshot_id}})
self.assertTrue(created_group_from_snap['id'])
created_group_from_snap_id = created_group_from_snap['id']
# Check it's there
found_volumes = self.api.get_volumes()
self._poll_volume_while(found_volumes[0], ['creating'])
self._poll_volume_while(found_volumes[1], ['creating'])
found_group_from_snap = self._poll_group_while(
created_group_from_snap_id, ['creating'])
self.assertEqual(created_group_from_snap_id,
found_group_from_snap['id'])
self.assertEqual(created_group_snapshot_id,
found_group_from_snap['group_snapshot_id'])
self.assertEqual(self.group_type['id'],
found_group_from_snap['group_type'])
self.assertEqual('available', found_group_from_snap['status'])
# Delete the group from snap
self.api.delete_group(created_group_from_snap_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_from_snap = self._poll_group_while(
created_group_from_snap_id, ['deleting'])
# Delete the group snapshot
self.api.delete_group_snapshot(created_group_snapshot_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_from_snap)
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_create_group_from_source_group(self):
"""Creates a group from a source group."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Test create group from source group
created_group_from_group = self.api.post_group_from_src(
{'create-from-src': {
'source_group_id': created_group_id}})
self.assertTrue(created_group_from_group['id'])
created_group_from_group_id = created_group_from_group['id']
# Check it's there
found_volumes = self.api.get_volumes()
self._poll_volume_while(found_volumes[0], ['creating'])
self._poll_volume_while(found_volumes[1], ['creating'])
found_group_from_group = self._poll_group_while(
created_group_from_group_id, ['creating'])
self.assertEqual(created_group_from_group_id,
found_group_from_group['id'])
self.assertEqual(created_group_id,
found_group_from_group['source_group_id'])
self.assertEqual(self.group_type['id'],
found_group_from_group['group_type'])
self.assertEqual('available', found_group_from_group['status'])
# Delete the group from group
self.api.delete_group(created_group_from_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_from_group = self._poll_group_while(
created_group_from_group_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_from_group)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_reset_group_snapshot(self):
# Create group
group1 = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(group1['id'])
group_id = group1['id']
self._poll_group_while(group_id, ['creating'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
self._poll_volume_while(created_volume_id, ['creating'])
# Create group snapshot
group_snapshot1 = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': group_id}})
self.assertTrue(group_snapshot1['id'])
group_snapshot_id = group_snapshot1['id']
self._poll_group_snapshot_while(group_snapshot_id, 'creating')
group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
self.assertEqual("available", group_snapshot1['status'])
# reset group snapshot status
self.api.reset_group_snapshot(group_snapshot_id,
{"reset_status": {"status": "error"}})
group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
self.assertEqual("error", group_snapshot1['status'])
# Delete group, volume and group snapshot
self.api.delete_group_snapshot(group_snapshot_id)
found_group_snapshot = self._poll_group_snapshot_while(
group_snapshot_id, ['deleting'])
self.api.delete_group(group_id,
{'delete': {'delete-volumes': True}})
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(group_id, ['deleting'])
# Created resoueces should be gone
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
| |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
shutil.copy(source, dest)
if extension in ('.plist', '.strings') and convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
args.extend(['--auto-activate-custom-fonts'])
if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
args.extend([
'--target-device', 'iphone', '--target-device', 'ipad',
'--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
])
else:
args.extend([
'--target-device', 'mac',
'--minimum-deployment-target',
os.environ['MACOSX_DEPLOYMENT_TARGET'],
])
args.extend(['--output-format', 'human-readable-text', '--compile', dest,
source])
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
r'file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
2. copy Entitlements.plist from user or SDK next to the bundle,
3. code sign the bundle.
"""
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--entitlements',
entitlements_path, '--timestamp=none', os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
from urllib3._collections import HTTPHeaderDict
from openapi_client import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
from openapi_client.model.pipelines import Pipelines
# path params
OrganizationSchema = StrSchema
RequestRequiredPathParams = typing.TypedDict(
'RequestRequiredPathParams',
{
'organization': OrganizationSchema,
}
)
RequestOptionalPathParams = typing.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_organization = api_client.PathParameter(
name="organization",
style=api_client.ParameterStyle.SIMPLE,
schema=OrganizationSchema,
required=True,
)
_path = '/blue/rest/organizations/{organization}/pipelines/'
_method = 'GET'
_auth = [
'jenkins_auth',
]
SchemaFor200ResponseBodyApplicationJson = Pipelines
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: Unset = unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
)
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
)
_status_code_to_response = {
'200': _response_for_200,
'401': _response_for_401,
'403': _response_for_403,
}
_all_accept_content_types = (
'application/json',
)
class GetPipelines(api_client.Api):
def get_pipelines(
self: api_client.Api,
path_params: RequestPathParams = frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs(RequestPathParams, path_params)
_path_params = {}
for parameter in (
request_path_organization,
):
parameter_data = path_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=_path,
method=_method,
path_params=_path_params,
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from horizon import exceptions
from horizon import tabs
from horizon.utils.lazy_encoder import LazyTranslationEncoder
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.network_topology.instances \
import tables as instances_tables
from openstack_dashboard.dashboards.project.network_topology.networks \
import tables as networks_tables
from openstack_dashboard.dashboards.project.network_topology.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.project.network_topology.routers \
import tables as routers_tables
from openstack_dashboard.dashboards.project.network_topology.subnets \
import tables as subnets_tables
from openstack_dashboard.dashboards.project.network_topology \
import tabs as topology_tabs
from openstack_dashboard.dashboards.project.network_topology import utils
from openstack_dashboard.dashboards.project.instances import\
console as i_console
from openstack_dashboard.dashboards.project.instances.tables import \
STATUS_DISPLAY_CHOICES as instance_choices
from openstack_dashboard.dashboards.project.instances import\
views as i_views
from openstack_dashboard.dashboards.project.instances.workflows import\
create_instance as i_workflows
from openstack_dashboard.dashboards.project.networks.subnets import\
views as s_views
from openstack_dashboard.dashboards.project.networks.subnets import\
workflows as s_workflows
from openstack_dashboard.dashboards.project.networks.tables import \
DISPLAY_CHOICES as network_display_choices
from openstack_dashboard.dashboards.project.networks.tables import \
STATUS_DISPLAY_CHOICES as network_choices
from openstack_dashboard.dashboards.project.networks import\
views as n_views
from openstack_dashboard.dashboards.project.networks import\
workflows as n_workflows
from openstack_dashboard.dashboards.project.routers.ports.tables import \
DISPLAY_CHOICES as ports_choices
from openstack_dashboard.dashboards.project.routers.ports.tables import \
STATUS_DISPLAY_CHOICES as ports_status_choices
from openstack_dashboard.dashboards.project.routers.ports import\
views as p_views
from openstack_dashboard.dashboards.project.routers.tables import \
ADMIN_STATE_DISPLAY_CHOICES as routers_admin_choices
from openstack_dashboard.dashboards.project.routers.tables import \
STATUS_DISPLAY_CHOICES as routers_status_choices
from openstack_dashboard.dashboards.project.routers import\
views as r_views
# List of known server statuses that wont connect to the console
console_invalid_status = {
'shutoff', 'suspended', 'resize', 'verify_resize',
'revert_resize', 'migrating', 'build', 'shelved',
'shelved_offloaded'}
class TranslationHelper(object):
"""Helper class to provide the translations of instances, networks,
routers and ports from other parts of the code to the network topology
"""
def __init__(self):
# turn translation tuples into dicts for easy access
self.instance = dict(instance_choices)
self.network = dict(network_choices)
self.network.update(dict(network_display_choices))
self.router = dict(routers_admin_choices)
self.router.update(dict(routers_status_choices))
self.port = dict(ports_choices)
self.port.update(dict(ports_status_choices))
# and turn all the keys into Uppercase for simple access
self.instance = {k.upper(): v for k, v in self.instance.items()}
self.network = {k.upper(): v for k, v in self.network.items()}
self.router = {k.upper(): v for k, v in self.router.items()}
self.port = {k.upper(): v for k, v in self.port.items()}
class NTAddInterfaceView(p_views.AddInterfaceView):
success_url = "horizon:project:network_topology:index"
failure_url = "horizon:project:network_topology:index"
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_context_data(self, **kwargs):
context = super(NTAddInterfaceView, self).get_context_data(**kwargs)
context['form_url'] = 'horizon:project:network_topology:interface'
return context
class NTCreateRouterView(r_views.CreateView):
template_name = 'project/network_topology/create_router.html'
success_url = reverse_lazy("horizon:project:network_topology:index")
page_title = _("Create a Router")
class NTCreateNetwork(n_workflows.CreateNetwork):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateNetworkView(n_views.CreateView):
workflow_class = NTCreateNetwork
class NTLaunchInstance(i_workflows.LaunchInstance):
success_url = "horizon:project:network_topology:index"
class NTLaunchInstanceView(i_views.LaunchInstanceView):
workflow_class = NTLaunchInstance
class NTCreateSubnet(s_workflows.CreateSubnet):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateSubnetView(s_views.CreateView):
workflow_class = NTCreateSubnet
class InstanceView(i_views.IndexView):
table_class = instances_tables.InstancesTable
template_name = 'project/network_topology/iframe.html'
def get_data(self):
self._more = False
# Get instance by id, return a list of one instance
# If failed to retrieve the instance, return an empty list
try:
instance_id = self.request.GET.get("id", "")
instance = api.nova.server_get(self.request, instance_id)
return [instance]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve the instance.'))
return []
class RouterView(r_views.IndexView):
table_class = routers_tables.RoutersTable
template_name = 'project/network_topology/iframe.html'
class NetworkView(n_views.IndexView):
table_class = networks_tables.NetworksTable
template_name = 'project/network_topology/iframe.html'
class RouterDetailView(r_views.DetailView):
table_classes = (ports_tables.PortsTable, )
template_name = 'project/network_topology/iframe.html'
def get_interfaces_data(self):
pass
class NetworkDetailView(n_views.DetailView):
table_classes = (subnets_tables.SubnetsTable, )
template_name = 'project/network_topology/iframe.html'
class NetworkTopologyView(tabs.TabView):
tab_group_class = topology_tabs.TopologyTabs
template_name = 'project/network_topology/index.html'
page_title = _("Network Topology")
def get_context_data(self, **kwargs):
context = super(NetworkTopologyView, self).get_context_data(**kwargs)
return utils.get_context(self.request, context)
class JSONView(View):
trans = TranslationHelper()
@property
def is_router_enabled(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def add_resource_url(self, view, resources):
tenant_id = self.request.user.tenant_id
for resource in resources:
if (resource.get('tenant_id')
and tenant_id != resource.get('tenant_id')):
continue
resource['url'] = reverse(view, None, [str(resource['id'])])
def _check_router_external_port(self, ports, router_id, network_id):
for port in ports:
if (port['network_id'] == network_id
and port['device_id'] == router_id):
return True
return False
def _get_servers(self, request):
# Get nova data
try:
servers, more = api.nova.server_list(request)
except Exception:
servers = []
data = []
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
# lowercase of the keys will be used at the end of the console URL.
for server in servers:
server_data = {'name': server.name,
'status': self.trans.instance[server.status],
'original_status': server.status,
'task': getattr(server, 'OS-EXT-STS:task_state'),
'id': server.id}
# Avoid doing extra calls for console if the server is in
# a invalid status for console connection
if server.status.lower() not in console_invalid_status:
try:
console = i_console.get_console(
request, console_type, server)[0].lower()
server_data['console'] = console
except exceptions.NotAvailable:
pass
data.append(server_data)
self.add_resource_url('horizon:project:instances:detail', data)
return data
def _get_networks(self, request):
# Get neutron data
# if we didn't specify tenant_id, all networks shown as admin user.
# so it is need to specify the networks. However there is no need to
# specify tenant_id for subnet. The subnet which belongs to the public
# network is needed to draw subnet information on public network.
try:
neutron_networks = api.neutron.network_list_for_tenant(
request,
request.user.tenant_id)
except Exception:
neutron_networks = []
networks = []
for network in neutron_networks:
obj = {'name': network.name_or_id,
'id': network.id,
'subnets': [{'id': subnet.id,
'cidr': subnet.cidr}
for subnet in network.subnets],
'status': self.trans.network[network.status],
'original_status': network.status,
'router:external': network['router:external']}
self.add_resource_url('horizon:project:networks:subnets:detail',
obj['subnets'])
networks.append(obj)
# Add public networks to the networks list
if self.is_router_enabled:
try:
neutron_public_networks = api.neutron.network_list(
request,
**{'router:external': True})
except Exception:
neutron_public_networks = []
my_network_ids = [net['id'] for net in networks]
for publicnet in neutron_public_networks:
if publicnet.id in my_network_ids:
continue
try:
subnets = []
for subnet in publicnet.subnets:
snet = {'id': subnet.id,
'cidr': subnet.cidr}
self.add_resource_url(
'horizon:project:networks:subnets:detail', snet)
subnets.append(snet)
except Exception:
subnets = []
networks.append({
'name': publicnet.name_or_id,
'id': publicnet.id,
'subnets': subnets,
'status': self.trans.network[publicnet.status],
'original_status': publicnet.status,
'router:external': publicnet['router:external']})
self.add_resource_url('horizon:project:networks:detail',
networks)
return sorted(networks,
key=lambda x: x.get('router:external'),
reverse=True)
def _get_routers(self, request):
if not self.is_router_enabled:
return []
try:
neutron_routers = api.neutron.router_list(
request,
tenant_id=request.user.tenant_id)
except Exception:
neutron_routers = []
routers = [{'id': router.id,
'name': router.name_or_id,
'status': self.trans.router[router.status],
'original_status': router.status,
'external_gateway_info': router.external_gateway_info}
for router in neutron_routers]
self.add_resource_url('horizon:project:routers:detail', routers)
return routers
def _get_ports(self, request, networks):
try:
neutron_ports = api.neutron.port_list(request)
except Exception:
neutron_ports = []
# we should filter out ports connected to non tenant networks
# which they have no visibility to
tenant_network_ids = [network['id'] for network in networks]
ports = [{'id': port.id,
'network_id': port.network_id,
'device_id': port.device_id,
'fixed_ips': port.fixed_ips,
'device_owner': port.device_owner,
'status': self.trans.port[port.status],
'original_status': port.status}
for port in neutron_ports
if port.device_owner != 'network:router_ha_interface'
and port.network_id in tenant_network_ids]
self.add_resource_url('horizon:project:networks:ports:detail',
ports)
return ports
def _prepare_gateway_ports(self, routers, ports):
# user can't see port on external network. so we are
# adding fake port based on router information
for router in routers:
external_gateway_info = router.get('external_gateway_info')
if not external_gateway_info:
continue
external_network = external_gateway_info.get(
'network_id')
if not external_network:
continue
if self._check_router_external_port(ports,
router['id'],
external_network):
continue
fake_port = {'id': 'gateway%s' % external_network,
'network_id': external_network,
'device_id': router['id'],
'fixed_ips': []}
ports.append(fake_port)
def get(self, request, *args, **kwargs):
networks = self._get_networks(request)
data = {'servers': self._get_servers(request),
'networks': networks,
'ports': self._get_ports(request, networks),
'routers': self._get_routers(request)}
self._prepare_gateway_ports(data['routers'], data['ports'])
json_string = json.dumps(data, cls=LazyTranslationEncoder,
ensure_ascii=False)
return HttpResponse(json_string, content_type='text/json')
| |
from __future__ import absolute_import
"""Routine to monitor the modal gain in each pixel as a
function of time. Uses COS Cumulative Image (CCI) files
to produce a modal gain map for each time period. Modal gain
maps for each period are collated to monitor the progress of
each pixel(superpixel) with time. Pixels that drop below
a threshold value are flagged and collected into a
gain sag table reference file (gsagtab).
The PHA modal gain threshold is set by global variable MODAL_GAIN_LIMIT.
Allowing the modal gain of a distribution to come within 1 gain bin
of the threshold results in ~8% loss of flux. Within
2 gain bins, ~4%
3 gain bins, ~2%
4 gain bins, ~1%
However, due to the column summing, a 4% loss in a region does not appear to be so in the extracted spectrum.
"""
__author__ = 'Justin Ely'
__maintainer__ = 'Justin Ely'
__email__ = 'ely@stsci.edu'
__status__ = 'Active'
import os
import sys
from astropy.io import fits
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import glob
import numpy as np
import multiprocessing as mp
import shutil
import logging
logger = logging.getLogger(__name__)
#from bokeh import charts
#from bokeh.plotting import figure
from .gainmap import make_all_hv_maps, make_all_gainmaps, make_total_gain
from ..utils import enlarge, send_email
from .findbad import time_trends
from .gsag import main as gsag_main
from .phaimage import make_phaimages
from .constants import *
from ..database.db_tables import open_settings, load_connection
MONITOR_DIR = '/grp/hst/cos/Monitors/CCI/'
WEB_DIR = '/grp/webpages/COS/cci/'
#------------------------------------------------------------
def make_quicklooks(gainmap, clobber=True):
"""Output some nice plots for quick-look analysis and
for the webpage in the future
"""
out_image_file = gainmap.replace('gainmap.fits', 'quicklook.png')
if os.path.exists(out_image_file):
if not clobber:
return
hdu = fits.open(gainmap)
image = enlarge(hdu['MOD_GAIN'].data, y=Y_BINNING, x=X_BINNING)
DETHV = hdu[0].header['DETHV']
EXPSTART = hdu[0].header['EXPSTART']
SEGMENT = hdu[0].header['SEGMENT']
if SEGMENT == 'FUVA':
lower_ext = 1
upper_ext = 2
head = FUVA_string
elif SEGMENT == 'FUVB':
lower_ext = 3
upper_ext = 4
head = FUVB_string
path, name = os.path.split(gainmap)
pha_name = os.path.join(path, 'l_' + name.split('_')[1] + '_phaimage_cci_phf.fits')
print(pha_name)
has_gain = np.zeros(image.shape)
index = np.where(image > 0)
has_gain[index] += 1
collapsed = np.sum(has_gain, axis=1)
if collapsed.sum() == 0:
peak = 400
else:
peak = 100 + collapsed[100:600].argmax()
row_gain = image[peak]
if os.path.exists(pha_name):
phaimage = fits.open(pha_name)
row_pha_lower = phaimage[lower_ext].data[peak]
row_pha_upper = phaimage[upper_ext].data[peak]
else:
row_pha_lower = np.ones(image.shape[1]) * 3
row_pha_upper = np.ones(image.shape[1]) * 23
#------Plotting-----------#
fig = plt.figure(figsize=(22, 10))
rectangle = np.array([.1, .1, .8, .8])
ax = fig.add_axes(rectangle)
cax = ax.imshow(image, aspect='auto', cmap=mpl.cm.get_cmap('hot_r'))
plot_flagged(ax, SEGMENT, DETHV, mjd=EXPSTART, color='blue')
ax.set_xlim(0, 16384)
ax.set_ylim(0, 1024)
ax.set_title('MJD: %5.5f' % (EXPSTART))
cax.set_clim(0, 20)
ax.grid(False)
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(10))
ax.set_xlabel('XCORR Pixel')
ax.set_ylabel('YCORR Pixel')
plt.text(100, 1000, s='DETHV: %d'% (DETHV), bbox=dict(boxstyle="round", fc="0.9"))
new_rect = rectangle.copy()
new_rect[1] = 7 * new_rect[3] / 8.
new_rect[3] /= 4.
new_rect = [.82, .18, .1, .8]
cax_holder = fig.add_axes(new_rect, frameon=False, visible=False)
cax_holder.set_xticklabels(['' for item in cax_holder.get_xticklabels()])
cax_holder.set_yticklabels(['' for item in cax_holder.get_yticklabels()])
fig.colorbar(cax, ax=cax_holder, ticks=range(0, 21), shrink=.7)
new_rect = rectangle.copy()
new_rect[3] /= 4.0
ax2 = fig.add_axes(new_rect, frameon=False)
ax2.plot(row_gain, color='b', lw=3)
ax2.plot(row_pha_lower, color='y', lw=2, label='PHF Limits')
ax2.plot(row_pha_upper, color='y', lw=2)
ax2.axhline(y=2, color='r', label='PHA 2')
ax2.axhline(y=3, color='r', ls='--', label='PHA 3')
ax2.set_title('Gain and Limits at Y = %d'% peak)
ax2.set_xticklabels(['' for item in ax2.get_xticklabels()])
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.set_ylabel('PHA Gain')
ax2.set_xlim(0, 16384)
ax2.set_ylim(0, 24)
ax2.legend(numpoints=1, shadow=True)
fig.savefig(out_image_file)
plt.close(fig)
print('WROTE: %s'% (out_image_file))
#------------------------------------------------------------
def make_cumulative_plots():
"""
Make plots showing cumulative gain for each HV setting.
"""
print('Making cumulative gainmaps')
for filename in glob.glob(os.path.join(MONITOR_DIR, '*proj_bad*.fits')):
hdu = fits.open(filename)
dethv = hdu[0].header['DETHV']
segment = hdu[0].header['SEGMENT']
fig = plt.figure(figsize=(25, 14))
ax = fig.add_subplot(1, 1, 1)
gain_image = enlarge(hdu['PROJGAIN'].data,
y=1024//hdu['PROJGAIN'].header['NAXIS2'],
x=16384//hdu['PROJGAIN'].header['NAXIS1'])
cax = ax.imshow(gain_image, aspect='auto')
plot_flagged(ax, segment, dethv, color='white')
ax.set_xlim(0, 16384)
ax.set_ylim(0, 1024)
ax.grid(False)
cax.set_clim(0, 16)
fig.colorbar(cax)
print(segment, dethv)
fig.savefig(os.path.join(MONITOR_DIR, 'cumulative_gainmap_'+segment+'_'+str(dethv)+'.png'))
plt.close(fig)
hdu.close()
#------------------------------------------------------------
def plot_flagged(ax, segment, hv, mjd=50000, color='r'):
"""
Plot a box at each flagged location
"""
if hv == -1:
return
gsagtab_filename = '/grp/hst/cos/Monitors/CCI/gsag_%s.fits'% (TIMESTAMP)
if os.path.exists(gsagtab_filename):
gsagtab = fits.open(gsagtab_filename)
print("Using {}".format(gsagtab_filename))
else:
all_gsagtables = glob.glob(os.path.join(MONITOR_DIR, 'gsag_????-??-*.fits'))
all_gsagtables.sort()
print(all_gsagtables[-1])
gsagtab = fits.open(all_gsagtables[-1])
print("Using {}".format(all_gsagtables[-1]))
if segment == 'FUVA':
hv_keyword = 'HVLEVELA'
elif segment == 'FUVB':
hv_keyword = 'HVLEVELB'
regions = []
found = False
for ext in gsagtab[1:]:
if ext.header['SEGMENT'] == segment:
if ext.header[hv_keyword] == hv:
regions = ext.data
found = True
break
if not found:
raise IndexError("Proper GSAG extension not found for {},{}".format(hv, segment))
for line in regions:
if line['Date'] > mjd: continue
lx = line['lx']
dx = line['dx']
ly = line['ly']
dy = line['dy']
x_values = [lx, lx+dx, lx+dx, lx, lx]
y_values = [ly, ly, ly+dy, ly+dy, ly]
ax.plot(x_values, y_values, color)
#-------------------------------------------------------------------------------
def plotting():
SETTINGS = open_settings()
Session, engine = load_connection(SETTINGS['connection_string'])
connection = engine.connect()
results = connection.execute("""SELECT counts,gain FROM gain;""")
counts = []
gain = []
for item in results:
counts.append(item.counts)
gain.append(item.gain)
#-- counts vs gain
TOOLS = "pan,wheel_zoom,box_zoom,box_select,lasso_select,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="above", logo="grey", plot_width=700)
p.background_fill= "#cccccc"
p.circle(gain,
counts,
size=12,
line_color="black",
fill_alpha=0.8)
p.xaxis.axis_label="Gain"
p.yaxis.axis_label="Counts"
p.grid.grid_line_color="white"
charts.save(obj=p, filename='cci_gain_vs_counts.html')
#-------------------------------------------------------------------------------
def monitor():
""" Main driver for monitoring program.
"""
logger.info("start monitor")
settings = open_settings()
out_dir = os.path.join(settings['monitor_location'], 'CCI')
if not os.path.exists(out_dir):
logger.warning("Creating output directory: {}".format(out_dir))
os.makedirs(out_dir)
#print('Making ALL Gain Maps')
#make_all_gainmaps()
make_phaimages(out_dir)
time_trends(out_dir)
gsag_main(out_dir)
#-- quicklooks
all_gainmaps = glob.glob(os.path.join(out_dir, '*gainmap*.fits'))
all_gainmaps.sort()
pool = mp.Pool(processes=10)
pool.map(make_quicklooks, all_gainmaps)
#--
###make_cumulative_plots()
#message = 'CCI Monitor run for %s complete. \n'% (TIMESTAMP)
#message += '\n'
#message += 'Calibration with CalCOS has finished \n '
#message += 'Check over the gsagtab comparison log and see if we need to deliver this file.\n\n\n'
#message += 'Sincerely,\n %s'% (__file__)
#move_to_web()
#send_email(subject='CCI Monitor complete', message=message)
logger.info("finish monitor")
#-------------------------------------------------------------------------------
def move_to_web():
"""Copy output products to web-facing directories.
Simple function to move created plots in the MONITOR_DIR
to the WEB_DIR. Will move all files that match the string
STIM*.p* and then change permissions to 777.
"""
print('Moving plots to web')
for item in glob.glob(os.path.join(MONITOR_DIR, 'cumulative_gainmap_'+'*'+'_'+'*'+'.png')):
print('COPYING {} TO {}'.format(item,WEB_DIR))
if os.path.isfile(os.path.join(WEB_DIR,os.path.basename(item))):
os.remove(os.path.join(WEB_DIR,os.path.basename(item)))
shutil.copy(item, WEB_DIR)
os.chmod(os.path.join(WEB_DIR, os.path.basename(item)),0o766)
#-------------------------------------------------------------------------------
| |
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals
import logging
import os
from collections import OrderedDict
import envoy
from guardian.shortcuts import assign_perm
from rest_framework import generics, status
from rest_framework.filters import DjangoFilterBackend, DjangoObjectPermissionsFilter
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.serializers import ValidationError
from stackdio.api.environments import filters, mixins, models, serializers, utils
from stackdio.api.formulas.serializers import FormulaVersionSerializer
from stackdio.core.constants import Activity
from stackdio.core.permissions import StackdioModelPermissions, StackdioObjectPermissions
from stackdio.core.renderers import PlainTextRenderer
from stackdio.core.serializers import ObjectPropertiesSerializer
from stackdio.core.viewsets import (
StackdioModelUserPermissionsViewSet,
StackdioModelGroupPermissionsViewSet,
StackdioObjectUserPermissionsViewSet,
StackdioObjectGroupPermissionsViewSet,
)
logger = logging.getLogger(__name__)
class EnvironmentListAPIView(generics.ListCreateAPIView):
"""
Displays a list of all environments visible to you.
"""
queryset = models.Environment.objects.all()
permission_classes = (StackdioModelPermissions,)
filter_backends = (DjangoObjectPermissionsFilter, DjangoFilterBackend)
filter_class = filters.EnvironmentFilter
lookup_field = 'name'
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.FullEnvironmentSerializer
else:
return serializers.EnvironmentSerializer
def perform_create(self, serializer):
env = serializer.save()
for perm in models.Environment.object_permissions:
assign_perm('environments.%s_environment' % perm, self.request.user, env)
class EnvironmentDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Environment.objects.all()
serializer_class = serializers.EnvironmentSerializer
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
class EnvironmentPropertiesAPIView(generics.RetrieveUpdateAPIView):
queryset = models.Environment.objects.all()
serializer_class = ObjectPropertiesSerializer
permission_classes = (StackdioObjectPermissions,)
lookup_field = 'name'
class EnvironmentHostListAPIView(mixins.EnvironmentRelatedMixin, generics.ListAPIView):
serializer_class = serializers.EnvironmentHostSerializer
def get_queryset(self):
environment = self.get_environment()
return sorted(environment.get_current_hosts(), key=lambda x: x['id'])
class EnvironmentLabelListAPIView(mixins.EnvironmentRelatedMixin, generics.ListCreateAPIView):
serializer_class = serializers.EnvironmentLabelSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.labels.all()
def get_serializer_context(self):
context = super(EnvironmentLabelListAPIView, self).get_serializer_context()
context['content_object'] = self.get_environment()
return context
def perform_create(self, serializer):
serializer.save(content_object=self.get_environment())
class EnvironmentLabelDetailAPIView(mixins.EnvironmentRelatedMixin,
generics.RetrieveUpdateDestroyAPIView):
serializer_class = serializers.EnvironmentLabelSerializer
lookup_field = 'key'
lookup_url_kwarg = 'label_name'
def get_queryset(self):
environment = self.get_environment()
return environment.labels.all()
def get_serializer_context(self):
context = super(EnvironmentLabelDetailAPIView, self).get_serializer_context()
context['content_object'] = self.get_environment()
return context
class EnvironmentComponentListAPIView(mixins.EnvironmentRelatedMixin, generics.ListAPIView):
serializer_class = serializers.EnvironmentComponentSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.get_components()
class EnvironmentActionAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
serializer_class = serializers.EnvironmentActionSerializer
def get(self, request, *args, **kwargs):
environment = self.get_environment()
# Grab the list of available actions for the current environment activity
available_actions = Activity.env_action_map.get(environment.activity, [])
# Filter them based on permissions
available_actions = utils.filter_actions(request.user, environment, available_actions)
return Response({
'available_actions': sorted(available_actions),
})
def post(self, request, *args, **kwargs):
"""
POST request allows RPC-like actions to be called to interact
with the environment. Request contains JSON with an `action` parameter
and optional `args` depending on the action being executed.
"""
environment = self.get_environment()
serializer = self.get_serializer(environment, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class EnvironmentFormulaVersionsAPIView(mixins.EnvironmentRelatedMixin, generics.ListCreateAPIView):
serializer_class = FormulaVersionSerializer
def get_queryset(self):
environment = self.get_environment()
return environment.formula_versions.all()
def perform_create(self, serializer):
serializer.save(content_object=self.get_environment())
class EnvironmentLogsAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
log_types = (
'provisioning',
'provisioning-error',
'orchestration',
'orchestration-error',
)
def get(self, request, *args, **kwargs):
environment = self.get_environment()
root_dir = environment.get_root_directory()
log_dir = environment.get_log_directory()
latest = OrderedDict()
for log_type in self.log_types:
spl = log_type.split('-')
if len(spl) > 1 and spl[1] == 'error':
log_file = '%s.err.latest' % spl[0]
else:
log_file = '%s.log.latest' % log_type
if os.path.isfile(os.path.join(root_dir, log_file)):
latest[log_type] = reverse(
'api:environments:environment-logs-detail',
kwargs={'parent_name': environment.name, 'log': log_file},
request=request,
)
historical = [
reverse('api:environments:environment-logs-detail',
kwargs={'parent_name': environment.name, 'log': log},
request=request)
for log in sorted(os.listdir(log_dir))
]
ret = OrderedDict((
('latest', latest),
('historical', historical),
))
return Response(ret)
class EnvironmentLogsDetailAPIView(mixins.EnvironmentRelatedMixin, generics.GenericAPIView):
renderer_classes = (PlainTextRenderer,)
# TODO: Code complexity ignored for now
def get(self, request, *args, **kwargs): # NOQA
environment = self.get_environment()
log_file = self.kwargs.get('log', '')
try:
tail = int(request.query_params.get('tail', 0))
except ValueError:
tail = None
try:
head = int(request.query_params.get('head', 0))
except ValueError:
head = None
if head and tail:
return Response('Both head and tail may not be used.',
status=status.HTTP_400_BAD_REQUEST)
if log_file.endswith('.latest'):
log = os.path.join(environment.get_root_directory(), log_file)
elif log_file.endswith('.log') or log_file.endswith('.err'):
log = os.path.join(environment.get_log_directory(), log_file)
else:
log = None
if not log or not os.path.isfile(log):
raise ValidationError({
'log_file': ['Log file does not exist: {0}.'.format(log_file)]
})
if tail:
ret = envoy.run('tail -{0} {1}'.format(tail, log)).std_out
elif head:
ret = envoy.run('head -{0} {1}'.format(head, log)).std_out
else:
with open(log, 'r') as f:
ret = f.read()
return Response(ret)
class EnvironmentModelUserPermissionsViewSet(StackdioModelUserPermissionsViewSet):
model_cls = models.Environment
class EnvironmentModelGroupPermissionsViewSet(StackdioModelGroupPermissionsViewSet):
model_cls = models.Environment
class EnvironmentObjectUserPermissionsViewSet(mixins.EnvironmentPermissionsMixin,
StackdioObjectUserPermissionsViewSet):
pass
class EnvironmentObjectGroupPermissionsViewSet(mixins.EnvironmentPermissionsMixin,
StackdioObjectGroupPermissionsViewSet):
pass
| |
# encoding: utf-8
import os
from copy import copy
import httplib2
import oauth2client
import inspector
from . import keyring
from .. import utils
def from_params(**params):
credentials = {}
for key, value in params.items():
if key in ('client_id', 'client_secret', 'client_email', 'private_key', 'access_token', 'refresh_token', 'identity'):
credentials[key] = value
return credentials
def from_keyring(identity=None, **params):
if identity:
return keyring.get(identity)
else:
return None
def from_environment(prefix=None, suffix=None, **params):
keys = {
'client_id': utils.affix(prefix, 'GOOGLE_ANALYTICS_CLIENT_ID', suffix),
'client_secret': utils.affix(prefix, 'GOOGLE_ANALYTICS_CLIENT_SECRET', suffix),
'refresh_token': utils.affix(prefix, 'GOOGLE_ANALYTICS_REFRESH_TOKEN', suffix),
}
credentials = {}
for credential, key in keys.items():
value = os.environ.get(key)
if value:
credentials[credential] = value
return credentials
def from_prompt(**params):
prompted = {}
if not params.get('identity'):
prompted['identity'] = utils.input('Human-readable account name: ')
if not params.get('client_id'):
prompted['client_id'] = utils.input('Client ID: ')
if not params.get('client_secret'):
prompted['client_secret'] = utils.input('Client secret: ')
return prompted
class Credentials(object):
STRATEGIES = {
'params': from_params,
'keyring': from_keyring,
'environment': from_environment,
'prompt': from_prompt,
}
INTERACTIVE_STRATEGIES = ['params', 'keyring', 'environment', 'prompt']
UNSUPERVISED_STRATEGIES = ['params', 'keyring', 'environment']
@classmethod
def find(cls, interactive=False, valid=False, complete=False, **params):
if interactive:
strategies = copy(cls.INTERACTIVE_STRATEGIES)
else:
strategies = copy(cls.UNSUPERVISED_STRATEGIES)
attempted = ", ".join(strategies)
credentials = cls()
while credentials.incomplete and len(strategies):
strategy = strategies.pop(0)
properties = cls.STRATEGIES[strategy](**params) or {}
for key, value in properties.items():
if not getattr(credentials, key):
setattr(credentials, key, value)
if not params.get(key):
params[key] = value
# the environment variable suffix is often a good
# descriptor of the nature of these credentials,
# when lacking anything better
if params.get('identity'):
credentials.identity = params['identity']
elif params.get('suffix') and credentials.identity is credentials.client_id:
credentials.identity = params.get('suffix')
if complete and credentials.incomplete:
raise KeyError("Could not find client credentials and token. Tried {attempted}.".format(
attempted=attempted))
elif valid and credentials.invalid:
raise KeyError("Could not find client id and client secret. Tried {attempted}.".format(
attempted=attempted))
else:
return credentials
def __init__(self, client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
identity=None):
self.client_id = client_id
self.client_secret = client_secret
self.client_email = client_email
self.private_key = private_key
self.access_token = access_token
self.refresh_token = refresh_token
self._identity = identity
@property
def token(self):
return self.refresh_token or self.access_token
@property
def identity(self):
return self._identity or self.client_id
@identity.setter
def identity(self, value):
self._identity = value
@property
def type(self):
if self.client_email and self.private_key:
return 2
elif self.client_id and self.client_secret:
return 3
else:
return None
@property
def valid(self):
""" Valid credentials are not necessarily correct, but
they contain all necessary information for an
authentication attempt. """
two_legged = self.client_email and self.private_key
three_legged = self.client_id and self.client_secret
return two_legged or three_legged or False
@property
def invalid(self):
return not self.valid
@property
def complete(self):
""" Complete credentials are valid and are either two-legged or include a token. """
return self.valid and (self.access_token or self.refresh_token or self.type == 2)
@property
def incomplete(self):
return not self.complete
@property
def oauth(self):
if self.incomplete:
return None
else:
if self.type == 2:
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.client_email,
private_key=self.private_key.encode('utf-8'),
scope='https://www.googleapis.com/auth/analytics.readonly',
)
else:
return oauth2client.client.OAuth2Credentials(
access_token=self.access_token,
client_id=self.client_id,
client_secret=self.client_secret,
refresh_token=self.refresh_token,
token_expiry=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
user_agent=None,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
id_token=None,
token_response=None
)
def serialize(self):
return {
'identity': self.identity,
'client_id': self.client_id,
'client_secret': self.client_secret,
'client_email': self.client_email,
'private_key': self.private_key,
'access_token': self.access_token,
'refresh_token': self.refresh_token,
}
def authorize(self):
return self.oauth.authorize(httplib2.Http())
def revoke(self):
if not self.token:
raise KeyError("Cannot revoke a token when no token was provided.")
# `credentials.revoke` will try to revoke the refresh token even
# if it's None, which will fail, so we have to miss with the innards
# of oauth2client here a little bit
return self.oauth._do_revoke(httplib2.Http().request, self.token)
def normalize(fn):
@inspector.changes(fn)
def normalized_fn(client_id=None, client_secret=None,
access_token=None, refresh_token=None, identity=None):
if isinstance(client_id, Credentials):
credentials = client_id
else:
credentials = Credentials(client_id, client_secret, access_token, refresh_token, identity)
return fn(credentials)
return normalized_fn
| |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementations of various third-party authentication schemes.
All the classes in this file are class Mixins designed to be used with
web.py RequestHandler classes. The primary methods for each service are
authenticate_redirect(), authorize_redirect(), and get_authenticated_user().
The former should be called to redirect the user to, e.g., the OpenID
authentication page on the third party service, and the latter should
be called upon return to get the user data from the data returned by
the third party service.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID:
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
import base64
import binascii
import cgi
import hashlib
import hmac
import httpclient
import escape
import logging
import time
import urllib
import urlparse
import uuid
_log = logging.getLogger("tornado.auth")
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See GoogleMixin below for example implementations.
"""
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Returns the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
"""
callback_uri = callback_uri or self.request.path
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the authenticate_redirect() or authorize_redirect()
methods.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems())
args["openid.mode"] = u"check_authentication"
url = self._OPENID_ENDPOINT + "?" + urllib.urlencode(args)
http = httpclient.AsyncHTTPClient()
http.fetch(url, self.async_callback(
self._on_authentication_verified, callback))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": "http://" + self.request.host + "/",
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, callback, response):
if response.error or u"is_valid:true" not in response.body:
_log.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns: return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name, values in self.request.arguments.iteritems():
if values[-1] == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name: return u""
return self.get_argument(ax_name, u"")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email: user["email"] = email
if locale: user["locale"] = locale
if username: user["username"] = username
callback(user)
class OAuthMixin(object):
"""Abstract implementation of OAuth.
See TwitterMixin and FriendFeedMixin below for example implementations.
"""
def authorize_redirect(self, callback_uri=None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri))
def get_authenticated_user(self, callback):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
"""
request_key = self.get_argument("oauth_token")
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
_log.warning("Missing OAuth request token cookie")
callback(None)
return
cookie_key, cookie_secret = request_cookie.split("|")
if cookie_key != request_key:
_log.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret)
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_access_token_url(token), self.async_callback(
self._on_access_token, callback))
def _oauth_request_token_url(self):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0",
)
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, response):
if response.error:
raise Exception("Could not get request token")
request_token = _oauth_parse_response(response.body)
data = "|".join([request_token["key"], request_token["secret"]])
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib.urlencode(args))
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=request_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0",
)
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_access_token(self, callback, response):
if response.error:
_log.warning("Could not fetch access token")
callback(None)
return
access_token = _oauth_parse_response(response.body)
user = self._oauth_get_user(access_token, self.async_callback(
self._on_oauth_get_user, access_token, callback))
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, callback, user):
if not user:
callback(None)
return
user["access_token"] = access_token
callback(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=access_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = signature
return base_args
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key and
Consumer Secret to the application settings 'twitter_consumer_key' and
'twitter_consumer_secret'. Use this Mixin on the handler for the URL
you registered as your application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with Twitter and get access to their stream:
class TwitterHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and all of the custom Twitter user
attributes describe at
http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show
in addition to 'access_token'. You should save the access token with
the user; it is required to make requests on behalf of the user later
with twitter_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "http://twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "http://twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "http://twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "http://twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = True
def authenticate_redirect(self):
"""Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
"""
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))
def twitter_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., "/statuses/user_timeline/btaylor"
The path should not include the format (we automatically append
".json" and parse the JSON output).
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at
http://apiwiki.twitter.com/Twitter-API-Documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://twitter.com" + path + ".json"
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_twitter_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_twitter_request(self, callback, response):
if response.error:
_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.twitter_request(
"/users/show/" + access_token["screen_name"],
access_token=access_token, callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["screen_name"]
callback(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then
copy your Consumer Key and Consumer Secret to the application settings
'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use
this Mixin on the handler for the URL you registered as your
application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with FriendFeed and get access to their feed:
class FriendFeedHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "FriendFeed auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and 'description' in addition to
'access_token'. You should save the access token with the user;
it is required to make requests on behalf of the user later with
friendfeed_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, callback, response):
if response.error:
_log.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token,
callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for authentication
or to access Google resources on behalf of a user. To authenticate with
Google, redirect with authenticate_redirect(). On return, parse the
response with get_authenticated_user(). We send a dict containing the
values for the user, including 'email', 'name', and 'locale'.
Example usage:
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources are:
Gmail Contacts - http://www.google.com/m8/feeds/
Calendar - http://www.google.com/calendar/feeds/
Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
"""
callback_uri = callback_uri or self.request.path
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = httpclient.AsyncHTTPClient()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
OpenIdMixin.get_authenticated_user(self, callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
OpenIdMixin.get_authenticated_user(self, callback)
class FacebookMixin(object):
"""Facebook Connect authentication.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
'facebook_api_key' and 'facebook_secret'.
When your application is set up, you can use this Mixin like this
to authenticate the user with Facebook:
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'facebook_uid' and 'name' in addition to session attributes
like 'session_key'. You should save the session key with the user; it is
required to make requests on behalf of the user later with
facebook_request().
"""
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None):
"""Authenticates/installs this app for the current user."""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.path
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, basestring):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib.urlencode(args))
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
publish_stream
read_stream
email
sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
"""
self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square," \
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib.urlencode(args)
http = httpclient.AsyncHTTPClient()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session["expires"],
})
def _parse_response(self, callback, response):
if response.error:
_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except:
_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode): body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [consumer_token["secret"]]
key_elems.append(token["secret"] if token else "")
key = "&".join(key_elems)
hash = hmac.new(key, base_string, hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode):
val = val.encode("utf-8")
return urllib.quote(val, safe="~")
def _oauth_parse_response(body):
p = cgi.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import ntpath
import re
from lib.core.common import Backend
from lib.core.common import hashDBWrite
from lib.core.common import isStackingAvailable
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import posixToNtSlashes
from lib.core.common import readInput
from lib.core.common import singleTimeDebugMessage
from lib.core.common import unArrayizeValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.enums import DBMS
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import OS
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
class Miscellaneous:
"""
This class defines miscellaneous functionalities for plugins.
"""
def __init__(self):
pass
def getRemoteTempPath(self):
if not conf.tmpPath and Backend.isDbms(DBMS.MSSQL):
debugMsg = "identifying Microsoft SQL Server error log directory "
debugMsg += "that sqlmap will use to store temporary files with "
debugMsg += "commands' output"
logger.debug(debugMsg)
_ = unArrayizeValue(inject.getValue("SELECT SERVERPROPERTY('ErrorLogFileName')", safeCharEncode=False))
if _:
conf.tmpPath = ntpath.dirname(_)
if not conf.tmpPath:
if Backend.isOs(OS.WINDOWS):
if conf.direct:
conf.tmpPath = "%TEMP%"
else:
self.checkDbmsOs(detailed=True)
if Backend.getOsVersion() in ("2000", "NT"):
conf.tmpPath = "C:/WINNT/Temp"
elif Backend.isOs("XP"):
conf.tmpPath = "C:/Documents and Settings/All Users/Application Data/Temp"
else:
conf.tmpPath = "C:/Windows/Temp"
else:
conf.tmpPath = "/tmp"
if re.search(r"\A[\w]:[\/\\]+", conf.tmpPath, re.I):
Backend.setOs(OS.WINDOWS)
conf.tmpPath = normalizePath(conf.tmpPath)
conf.tmpPath = ntToPosixSlashes(conf.tmpPath)
singleTimeDebugMessage("going to use '%s' as temporary files directory" % conf.tmpPath)
hashDBWrite(HASHDB_KEYS.CONF_TMP_PATH, conf.tmpPath)
return conf.tmpPath
def getVersionFromBanner(self):
if "dbmsVersion" in kb.bannerFp:
return
infoMsg = "detecting back-end DBMS version from its banner"
logger.info(infoMsg)
if Backend.isDbms(DBMS.MYSQL):
first, last = 1, 6
elif Backend.isDbms(DBMS.PGSQL):
first, last = 12, 6
elif Backend.isDbms(DBMS.MSSQL):
first, last = 29, 9
else:
raise SqlmapUnsupportedFeatureException("unsupported DBMS")
query = queries[Backend.getIdentifiedDbms()].substring.query % (queries[Backend.getIdentifiedDbms()].banner.query, first, last)
if conf.direct:
query = "SELECT %s" % query
kb.bannerFp["dbmsVersion"] = unArrayizeValue(inject.getValue(query))
kb.bannerFp["dbmsVersion"] = (kb.bannerFp["dbmsVersion"] or "").replace(",", "").replace("-", "").replace(" ", "")
def delRemoteFile(self, filename):
if not filename:
return
self.checkDbmsOs()
if Backend.isOs(OS.WINDOWS):
filename = posixToNtSlashes(filename)
cmd = "del /F /Q %s" % filename
else:
cmd = "rm -f %s" % filename
self.execCmd(cmd, silent=True)
def createSupportTbl(self, tblName, tblField, tblType):
inject.goStacked("DROP TABLE %s" % tblName, silent=True)
if Backend.isDbms(DBMS.MSSQL) and tblName == self.cmdTblName:
inject.goStacked("CREATE TABLE %s(id INT PRIMARY KEY IDENTITY, %s %s)" % (tblName, tblField, tblType))
else:
inject.goStacked("CREATE TABLE %s(%s %s)" % (tblName, tblField, tblType))
def cleanup(self, onlyFileTbl=False, udfDict=None, web=False):
"""
Cleanup file system and database from sqlmap create files, tables
and functions
"""
if web and self.webBackdoorFilePath:
logger.info("cleaning up the web files uploaded")
self.delRemoteFile(self.webStagerFilePath)
self.delRemoteFile(self.webBackdoorFilePath)
if not isStackingAvailable() and not conf.direct:
return
if Backend.isOs(OS.WINDOWS):
libtype = "dynamic-link library"
elif Backend.isOs(OS.LINUX):
libtype = "shared object"
else:
libtype = "shared library"
if onlyFileTbl:
logger.debug("cleaning up the database management system")
else:
logger.info("cleaning up the database management system")
logger.debug("removing support tables")
inject.goStacked("DROP TABLE %s" % self.fileTblName, silent=True)
inject.goStacked("DROP TABLE %shex" % self.fileTblName, silent=True)
if not onlyFileTbl:
inject.goStacked("DROP TABLE %s" % self.cmdTblName, silent=True)
if Backend.isDbms(DBMS.MSSQL):
udfDict = {"master..new_xp_cmdshell": None}
if udfDict is None:
udfDict = self.sysUdfs
for udf, inpRet in udfDict.items():
message = "do you want to remove UDF '%s'? [Y/n] " % udf
output = readInput(message, default="Y")
if not output or output in ("y", "Y"):
dropStr = "DROP FUNCTION %s" % udf
if Backend.isDbms(DBMS.PGSQL):
inp = ", ".join(i for i in inpRet["input"])
dropStr += "(%s)" % inp
logger.debug("removing UDF '%s'" % udf)
inject.goStacked(dropStr, silent=True)
logger.info("database management system cleanup finished")
warnMsg = "remember that UDF %s files " % libtype
if conf.osPwn:
warnMsg += "and Metasploit related files in the temporary "
warnMsg += "folder "
warnMsg += "saved on the file system can only be deleted "
warnMsg += "manually"
logger.warn(warnMsg)
def likeOrExact(self, what):
message = "do you want sqlmap to consider provided %s(s):\n" % what
message += "[1] as LIKE %s names (default)\n" % what
message += "[2] as exact %s names" % what
choice = readInput(message, default='1')
if not choice or choice == '1':
choice = '1'
condParam = " LIKE '%%%s%%'"
elif choice == '2':
condParam = "='%s'"
else:
errMsg = "invalid value"
raise SqlmapNoneDataException(errMsg)
return choice, condParam
| |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
idx = pd.IndexSlice
from scipy import interpolate
from scipy import stats
from collections import OrderedDict
from kpfm.lockin import LockIn, FIRStateLockVarF, FIRStateLock
def masklh(x, l=None, r=None):
if l is None:
return (x < r)
elif r is None:
return (x >= l)
else:
return (x >= l) & (x < r)
def weight(tau, dt, N_sigma=5, N=None, style=stats.expon):
scale = tau / dt
if N is None:
N = int(round(scale * N_sigma))
i = np.arange(N)
return style.pdf(i, scale=scale)
def get_plot_data(y, dt, t0, N_dec_plot, plot_fir, f0):
fs = 1. / dt
lockstate = FIRStateLock(plot_fir, N_dec_plot, f0, 0.0, t0=t0, fs=fs)
lockstate.filt(y)
lockstate.dphi = np.unwrap(np.angle(lockstate.z_out))
df = np.gradient(lockstate.dphi) * (
fs / (N_dec_plot * 2*np.pi))
A = np.abs(lockstate.z_out)
plot_data = dict(t0=lockstate.t0_dec,
dt=dt * N_dec_plot,
A=A,
f=df + f0
)
return plot_data
def measure_dA_dphi(lock, li, tp, t_fit=2e-3,
dphi_weight_before=None,
dphi_weight_after=None):
"""Correct for impulsive phase shift at end of pulse time."""
fs = li.fs
if dphi_weight_before is None:
N_b = int(round(fs*t_fit))
else:
N_b = len(dphi_weight_before)
t_fit = N_b / fs
if dphi_weight_after is None:
N_a = int(round(fs*t_fit))
else:
N_a = len(dphi_weight_after)
t_fit = N_a / fs
i_tp = np.arange(lock.t.size)[lock.t < tp][-1]
# Use 20 data points for interpolating; this is slightly over one
# cycle of our oscillation
m = np.arange(-10, 11) + i_tp
# This interpolator worked reasonably for similar, low-frequency sine waves
interp = interpolate.KroghInterpolator(lock.t[m], lock.x[m])
x0 = interp(tp)[()]
# We only need t0 approximately; the precise value of f0 doesn't matter very much.
t0 = li.t[(li.t < tp)][-1]
f0 = li.df[(li.t < tp)][-1] + li.f0(t0)
v0 = interp.derivative(tp)[()]
x2 = v0 / (2*np.pi*f0)
phi0 = np.arctan2(-x2, x0)
ml = masklh(li.t, tp-t_fit, tp)
mr = masklh(li.t, tp, tp + t_fit)
ml_phi = np.arange(li.t.size)[li.t <= tp][-N_b:]
mr_phi = np.arange(li.t.size)[li.t > tp][:N_a]
A = abs(li.z_out)
phi = np.unwrap(np.angle(li.z_out))/(2*np.pi)
mbAl = np.polyfit(li.t[ml], A[ml], 1)
mbAr = np.polyfit(li.t[mr], A[mr], 1)
mb_phi_l = np.polyfit(li.t[ml_phi], phi[ml_phi], 1, w=dphi_weight_before)
mb_phi_r = np.polyfit(li.t[mr_phi], phi[mr_phi], 1, w=dphi_weight_after)
dA = np.polyval(mbAr, tp) - np.polyval(mbAl, tp)
dphi = np.polyval(mb_phi_r, tp) - np.polyval(mb_phi_l, tp)
return phi0, dA, dphi
def individual_phasekick2(y, dt, t0, t1, t2, tp, N_dec, lockin_fir,
weight_before, weight_after, plot_funcs=None, **kwargs):
"""
x
fs
t1
t2
tp
lockin_fir (chosen by fp, fc)
N_dec (chosen by int(fs/fs_dec))
weight_before (chosen by opt. filter)
weight_after (chosen by opt. filter)
"""
fs = 1. / dt
t = np.arange(y.size) * dt + t0
lock = LockIn(t, y, fs)
lock.run(fir=lockin_fir)
lock.phase(tf=-t2)
fc0 = lock.f0corr
N_b = weight_before.size
N_a = weight_after.size
# Find N_smooth points before begining of pulse
# Flip filter coefficients for convolution
# Could also fit phase to a line here
df_before = np.polyfit(lock.t[lock.t < 0][-N_b:],
lock.df[lock.t < 0][-N_b:],
0,
w=weight_before[::-1])
# Don't flip filter coefficients, this is 'anti-casual',
# inferring f at tp from f at times t > tp
df_after = np.polyfit(lock.t[lock.t > tp][:N_a],
lock.df[lock.t > tp][:N_a],
0,
w=weight_after)
f1 = fc0 + df_before
f2 = fc0 + df_after
lock.phase(ti=-dt*N_b/5, tf=0)
phi0 = -lock.phi[0]
def f_var(t):
return np.where(t > tp, f2,
np.where(t > t0 + t1, f1, f2)
)
lockstate = FIRStateLockVarF(lockin_fir, N_dec, f_var, phi0, t0=t0, fs=fs)
lockstate.filt(y)
lockstate.dphi = np.unwrap(np.angle(lockstate.z_out))
lockstate.df = np.gradient(lockstate.dphi) * (
fs / (N_dec * 2*np.pi))
lockstate.t = td = lockstate.get_t()
phi0_tp, dA, dphi_tp = measure_dA_dphi(lock, lockstate, tp, t_fit=None,
dphi_weight_before=weight_before[::-1],
dphi_weight_after=weight_after)
mb_before = np.polyfit(td[td < 0][-N_b:],
lockstate.dphi[td < 0][-N_b:],
1,
w=weight_before[::-1])
mb_after = np.polyfit(td[td > tp][:N_a] - tp,
lockstate.dphi[td > tp][:N_a],
1,
w=weight_after)
phi0 = mb_before[1] + tp * mb_before[0]
phi1 = mb_after[1]
# Enforce conversion to float to get rid of annoying 1 element arrays
# that will cause problems in the DataFrame later.
outd = dict(
mb_before=mb_before,
mb_after=mb_after,
dphi=float(phi1 - phi0),
phi0_tp=float(phi0_tp),
dA=float(dA),
dphi_tp=float(dphi_tp),
fc0=float(fc0),
f1=float(f1),
f2=float(f2)
)
return outd
def pk_dictionary_to_dataframe(d):
_df = pd.DataFrame.from_dict(d, orient='index')
named_index = pd.MultiIndex(levels=_df.index.levels,
labels=_df.index.labels,
names=['expt', 'ds'],)
df = pd.DataFrame(_df, index=named_index)
return df
def make_df_dict(d):
"""Reduce the keys in data, and turn them into the keys need for
the pandas dataframe."""
dout = {}
for key, val in d.items():
data = val['data']
out = val['out']
dout[key] = OrderedDict([
('tp', data['tp']),
('tp_ms', data['tp']*1e3),
('dphi [cyc]', out['dphi'] / 2*np.pi),
('dphi [mcyc]', out['dphi'] / 2*np.pi * 1e3),
('f0 [Hz]', out['fc0']),
('df_dV [Hz]', out['f1'] - out['fc0']),
('dA [nm]', out['dA']),
('dphi_tp_end [cyc]', out['dphi_tp'] / (2*np.pi)),
('phi_at_tp [rad]', out['phi0_tp']),
('relative time [s]', data['relative_time']),
('dphi_corrected [cyc]', out['dphi'] - out['dphi_tp']),
('dphi_corrected [mcyc]', (out['dphi'] - out['dphi_tp']) * 1e3)
]
)
return dout
| |
from __future__ import unicode_literals
import boto3
import sure # noqa
import json
from moto.ec2 import utils as ec2_utils
from uuid import UUID
from moto import mock_ecs
from moto import mock_ec2
@mock_ecs
def test_create_cluster():
client = boto3.client('ecs', region_name='us-east-1')
response = client.create_cluster(
clusterName='test_ecs_cluster'
)
response['cluster']['clusterName'].should.equal('test_ecs_cluster')
response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
response['cluster']['status'].should.equal('ACTIVE')
response['cluster']['registeredContainerInstancesCount'].should.equal(0)
response['cluster']['runningTasksCount'].should.equal(0)
response['cluster']['pendingTasksCount'].should.equal(0)
response['cluster']['activeServicesCount'].should.equal(0)
@mock_ecs
def test_list_clusters():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_cluster0'
)
_ = client.create_cluster(
clusterName='test_cluster1'
)
response = client.list_clusters()
response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0')
response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1')
@mock_ecs
def test_delete_cluster():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_ecs_cluster'
)
response = client.delete_cluster(cluster='test_ecs_cluster')
response['cluster']['clusterName'].should.equal('test_ecs_cluster')
response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
response['cluster']['status'].should.equal('ACTIVE')
response['cluster']['registeredContainerInstancesCount'].should.equal(0)
response['cluster']['runningTasksCount'].should.equal(0)
response['cluster']['pendingTasksCount'].should.equal(0)
response['cluster']['activeServicesCount'].should.equal(0)
response = client.list_clusters()
len(response['clusterArns']).should.equal(0)
@mock_ecs
def test_register_task_definition():
client = boto3.client('ecs', region_name='us-east-1')
response = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
type(response['taskDefinition']).should.be(dict)
response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world')
response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest')
response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024)
response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400)
response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True)
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID')
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY')
response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file')
@mock_ecs
def test_list_task_definitions():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world2',
'image': 'docker/hello-world2:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY2'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
response = client.list_task_definitions()
len(response['taskDefinitionArns']).should.equal(2)
response['taskDefinitionArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
response['taskDefinitionArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2')
@mock_ecs
def test_deregister_task_definition():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
response = client.deregister_task_definition(
taskDefinition='test_ecs_task:1'
)
type(response['taskDefinition']).should.be(dict)
response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world')
response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest')
response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024)
response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400)
response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True)
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID')
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY')
response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file')
@mock_ecs
def test_create_service():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_ecs_cluster'
)
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
response = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service',
taskDefinition='test_ecs_task',
desiredCount=2
)
response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
response['service']['desiredCount'].should.equal(2)
len(response['service']['events']).should.equal(0)
len(response['service']['loadBalancers']).should.equal(0)
response['service']['pendingCount'].should.equal(0)
response['service']['runningCount'].should.equal(0)
response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service')
response['service']['serviceName'].should.equal('test_ecs_service')
response['service']['status'].should.equal('ACTIVE')
response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
@mock_ecs
def test_list_services():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_ecs_cluster'
)
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
_ = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service1',
taskDefinition='test_ecs_task',
desiredCount=2
)
_ = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service2',
taskDefinition='test_ecs_task',
desiredCount=2
)
response = client.list_services(
cluster='test_ecs_cluster'
)
len(response['serviceArns']).should.equal(2)
response['serviceArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
response['serviceArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2')
@mock_ecs
def test_update_service():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_ecs_cluster'
)
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
response = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service',
taskDefinition='test_ecs_task',
desiredCount=2
)
response['service']['desiredCount'].should.equal(2)
response = client.update_service(
cluster='test_ecs_cluster',
service='test_ecs_service',
desiredCount=0
)
response['service']['desiredCount'].should.equal(0)
@mock_ecs
def test_delete_service():
client = boto3.client('ecs', region_name='us-east-1')
_ = client.create_cluster(
clusterName='test_ecs_cluster'
)
_ = client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
_ = client.create_service(
cluster='test_ecs_cluster',
serviceName='test_ecs_service',
taskDefinition='test_ecs_task',
desiredCount=2
)
_ = client.update_service(
cluster='test_ecs_cluster',
service='test_ecs_service',
desiredCount=0
)
response = client.delete_service(
cluster='test_ecs_cluster',
service='test_ecs_service'
)
response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
response['service']['desiredCount'].should.equal(0)
len(response['service']['events']).should.equal(0)
len(response['service']['loadBalancers']).should.equal(0)
response['service']['pendingCount'].should.equal(0)
response['service']['runningCount'].should.equal(0)
response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service')
response['service']['serviceName'].should.equal('test_ecs_service')
response['service']['status'].should.equal('ACTIVE')
response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
@mock_ec2
@mock_ecs
def test_register_container_instance():
ecs_client = boto3.client('ecs', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
test_cluster_name = 'test_ecs_cluster'
_ = ecs_client.create_cluster(
clusterName=test_cluster_name
)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name,
instanceIdentityDocument=instance_id_document
)
response['containerInstance']['ec2InstanceId'].should.equal(test_instance.id)
full_arn = response['containerInstance']['containerInstanceArn']
arn_part = full_arn.split('/')
arn_part[0].should.equal('arn:aws:ecs:us-east-1:012345678910:container-instance')
arn_part[1].should.equal(str(UUID(arn_part[1])))
response['containerInstance']['status'].should.equal('ACTIVE')
len(response['containerInstance']['registeredResources']).should.equal(0)
len(response['containerInstance']['remainingResources']).should.equal(0)
response['containerInstance']['agentConnected'].should.equal(True)
response['containerInstance']['versionInfo']['agentVersion'].should.equal('1.0.0')
response['containerInstance']['versionInfo']['agentHash'].should.equal('4023248')
response['containerInstance']['versionInfo']['dockerVersion'].should.equal('DockerVersion: 1.5.0')
@mock_ec2
@mock_ecs
def test_list_container_instances():
ecs_client = boto3.client('ecs', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
test_cluster_name = 'test_ecs_cluster'
_ = ecs_client.create_cluster(
clusterName=test_cluster_name
)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name,
instanceIdentityDocument=instance_id_document)
test_instance_arns.append(response['containerInstance']['containerInstanceArn'])
response = ecs_client.list_container_instances(cluster=test_cluster_name)
len(response['containerInstanceArns']).should.equal(instance_to_create)
for arn in test_instance_arns:
response['containerInstanceArns'].should.contain(arn)
@mock_ec2
@mock_ecs
def test_describe_container_instances():
ecs_client = boto3.client('ecs', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
test_cluster_name = 'test_ecs_cluster'
_ = ecs_client.create_cluster(
clusterName=test_cluster_name
)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name,
instanceIdentityDocument=instance_id_document)
test_instance_arns.append(response['containerInstance']['containerInstanceArn'])
test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns))
response = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=test_instance_ids)
len(response['failures']).should.equal(0)
len(response['containerInstances']).should.equal(instance_to_create)
response_arns = [ci['containerInstanceArn'] for ci in response['containerInstances']]
for arn in test_instance_arns:
response_arns.should.contain(arn)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import os
from buildbot.changes.filter import ChangeFilter
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.timed import Nightly
from buildbot.status.mail import MailNotifier
from buildbot import util
from config_bootstrap import Master
from common import chromium_utils
from master import gitiles_poller
from master import master_utils
from master import repo_poller
from master import slaves_list
from master.factory import annotator_factory
def PopulateBuildmasterConfig(BuildmasterConfig, builders_path,
active_master_cls):
"""Read builders_path and populate a build master config dict."""
builders = chromium_utils.ReadBuildersFile(builders_path)
_Populate(BuildmasterConfig, builders, active_master_cls)
def _Populate(BuildmasterConfig, builders, active_master_cls):
m_annotator = annotator_factory.AnnotatorFactory(active_master_cls)
c = BuildmasterConfig
c['logCompressionLimit'] = False
c['projectName'] = active_master_cls.project_name
c['projectURL'] = Master.project_url
c['buildbotURL'] = active_master_cls.buildbot_url
# This sets c['db_url'] to the database connect string in found in
# the .dbconfig in the master directory, if it exists. If this is
# a production host, it must exist.
chromium_utils.DatabaseSetup(
c,
require_dbconfig=active_master_cls.is_production_host)
c['builders'] = _ComputeBuilders(builders, m_annotator)
c['schedulers'] = _ComputeSchedulers(builders)
c['change_source'], tag_comparator = _ComputeChangeSourceAndTagComparator(
builders)
# The 'slaves' list defines the set of allowable buildslaves. List all the
# slaves registered to a builder. Remove dupes.
c['slaves'] = master_utils.AutoSetupSlaves(
c['builders'],
Master.GetBotPassword())
# This does some sanity checks on the configuration.
slaves = slaves_list.BaseSlavesList(
chromium_utils.GetSlavesFromBuilders(builders),
builders['master_classname'])
master_utils.VerifySetup(c, slaves)
default_public_html = os.path.join(chromium_utils.BUILD_DIR,
'masters', 'master.chromium',
'public_html')
public_html = builders.get('public_html', default_public_html)
# Adds common status and tools to this master.
# TODO: Look at the logic in this routine to see if any of the logic
# in this routine can be moved there to simplify things.
master_utils.AutoSetupMaster(c, active_master_cls,
public_html=public_html,
templates=builders['templates'],
tagComparator=tag_comparator,
enable_http_status_push=active_master_cls.is_production_host)
# TODO: AutoSetupMaster's settings for the following are too low to be
# useful for most projects. We should fix that.
c['buildHorizon'] = 3000
c['logHorizon'] = 3000
# Must be at least 2x the number of slaves.
c['eventHorizon'] = 200
def _ComputeBuilders(builders, m_annotator):
actual_builders = []
def cmp_fn(a, b):
a_cat = builders['builders'][a].get('category')
b_cat = builders['builders'][b].get('category')
if a_cat != b_cat:
return 1 if a_cat > b_cat else -1
if a != b:
return 1 if a > b else -1
return 0
for builder_name in sorted(builders['builders'], cmp=cmp_fn):
builder_data = builders['builders'][builder_name]
has_schedulers = bool(
builder_data.get('scheduler', builder_data.get('schedulers')))
# We will automatically merge all build requests for any
# builder that can be scheduled; this is normally the behavior
# we want for repo-triggered builders and cron-triggered builders.
# You can override this behavior by setting the mergeRequests field though.
merge_requests = builder_data.get('mergeRequests', has_schedulers)
slavebuilddir = builder_data.get('slavebuilddir',
util.safeTranslate(builder_name))
factory = m_annotator.BaseFactory(
recipe=builder_data['recipe'],
max_time=builder_data.get('builder_timeout_s'),
factory_properties=builder_data.get('properties')
)
actual_builders.append({
'auto_reboot': builder_data.get('auto_reboot', True),
'mergeRequests': merge_requests,
'name': builder_name,
'factory': factory,
'slavebuilddir': slavebuilddir,
'slavenames': chromium_utils.GetSlaveNamesForBuilder(builders,
builder_name),
'category': builder_data.get('category'),
'trybot': builder_data.get('trybot'),
})
return actual_builders
def _ComputeSchedulers(builders):
scheduler_to_builders = {}
for builder_name, builder_data in builders['builders'].items():
scheduler_names = builder_data.get('schedulers', [])
if 'scheduler' in builder_data:
scheduler_names.append(builder_data['scheduler'])
for scheduler_name in scheduler_names:
if scheduler_name:
if scheduler_name not in builders['schedulers']:
raise ValueError('unknown scheduler "%s"' % scheduler_name)
scheduler_to_builders.setdefault(
scheduler_name, []).append(builder_name)
schedulers = []
for scheduler_name, scheduler_values in builders['schedulers'].items():
scheduler_type = scheduler_values['type']
builder_names = scheduler_to_builders[scheduler_name]
if scheduler_type == 'git_poller':
# git_poller pollers group changes, so we match on our specific repository
# to ensure that we only pick up changes from our poller.
schedulers.append(SingleBranchScheduler(
name=scheduler_name,
change_filter=ChangeFilter(
repository=scheduler_values['git_repo_url'],
branch=scheduler_values.get('branch', 'master'),
),
treeStableTimer=scheduler_values.get('treeStableTimer', 60),
builderNames=builder_names))
elif scheduler_type == 'repo_poller':
# repo_poller pollers punt changes that use the scheduler name as their
# category (see _ComputeChangeSourceAndTagComparator). Matching on this
# ensures that we only match changes from our poller.
schedulers.append(SingleBranchScheduler(
name=scheduler_name,
change_filter=ChangeFilter(
category=str(scheduler_name),
),
treeStableTimer=scheduler_values.get('treeStableTimer', 60),
builderNames=builder_names))
elif scheduler_type == 'cron':
schedulers.append(Nightly(
name=scheduler_name,
branch='master',
minute=scheduler_values['minute'],
hour=scheduler_values['hour'],
builderNames=builder_names))
else:
raise ValueError('unsupported scheduler type "%s"' % scheduler_type)
return schedulers
def _ComputeChangeSourceAndTagComparator(builders):
change_source = []
tag_comparator = None
git_urls_to_branches = {}
for scheduler_config in builders['schedulers'].values():
if scheduler_config['type'] != 'git_poller':
continue
url = scheduler_config['git_repo_url']
branch = scheduler_config.get('branch', 'master')
git_urls_to_branches.setdefault(url, set()).add(branch)
for url, branches in git_urls_to_branches.iteritems():
change_source.append(
gitiles_poller.GitilesPoller(url, branches=list(branches)))
for scheduler_name, scheduler_config in builders['schedulers'].iteritems():
if scheduler_config['type'] != 'repo_poller':
continue
rev_link_template = scheduler_config.get('rev_link_template')
branch = scheduler_config.get('branch')
branches = [branch] if branch is not None else None
change_source.append(repo_poller.RepoPoller(
repo_url=scheduler_config['repo_url'],
manifest='manifest',
category=str(scheduler_name),
repo_branches=branches,
pollInterval=300,
revlinktmpl=rev_link_template))
# We have to set the tag_comparator to something, but if we have multiple
# repos, the tag_comparator will not work properly (it's meaningless).
# It's not clear if there's a good answer to this.
if change_source:
tag_comparator = change_source[0].comparator
return change_source, tag_comparator
| |
from __future__ import unicode_literals
from random import randint
from django.template import Template
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from .compatibility import text_type
from .layout import LayoutObject, Field, Div
from .utils import render_field, flatatt, TEMPLATE_PACK
class PrependedAppendedText(Field):
template = "%s/layout/prepended_appended_text.html"
def __init__(self, field, prepended_text=None, appended_text=None, *args, **kwargs):
self.field = field
self.appended_text = appended_text
self.prepended_text = prepended_text
if 'active' in kwargs:
self.active = kwargs.pop('active')
self.input_size = None
css_class = kwargs.get('css_class', '')
if 'input-lg' in css_class:
self.input_size = 'input-lg'
if 'input-sm' in css_class:
self.input_size = 'input-sm'
super(PrependedAppendedText, self).__init__(field, *args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, extra_context=None, **kwargs):
extra_context = {
'crispy_appended_text': self.appended_text,
'crispy_prepended_text': self.prepended_text,
'input_size': self.input_size,
'active': getattr(self, "active", False)
}
template = self.get_template_name(template_pack)
return render_field(
self.field, form, form_style, context,
template=template, attrs=self.attrs,
template_pack=template_pack, extra_context=extra_context, **kwargs
)
class AppendedText(PrependedAppendedText):
def __init__(self, field, text, *args, **kwargs):
kwargs.pop('appended_text', None)
kwargs.pop('prepended_text', None)
self.text = text
super(AppendedText, self).__init__(field, appended_text=text, **kwargs)
class PrependedText(PrependedAppendedText):
def __init__(self, field, text, *args, **kwargs):
kwargs.pop('appended_text', None)
kwargs.pop('prepended_text', None)
self.text = text
super(PrependedText, self).__init__(field, prepended_text=text, **kwargs)
class FormActions(LayoutObject):
"""
Bootstrap layout object. It wraps fields in a <div class="form-actions">
Example::
FormActions(
HTML(<span style="display: hidden;">Information Saved</span>),
Submit('Save', 'Save', css_class='btn-primary')
)
"""
template = "%s/layout/formactions.html"
def __init__(self, *fields, **kwargs):
self.fields = list(fields)
self.template = kwargs.pop('template', self.template)
self.attrs = kwargs
if 'css_class' in self.attrs:
self.attrs['class'] = self.attrs.pop('css_class')
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = self.get_rendered_fields(form, form_style, context, template_pack, **kwargs)
template = self.get_template_name(template_pack)
context.update({
'formactions': self,
'fields_output': html
})
return render_to_string(template, context.flatten())
def flat_attrs(self):
return flatatt(self.attrs)
class InlineCheckboxes(Field):
"""
Layout object for rendering checkboxes inline::
InlineCheckboxes('field_name')
"""
template = "%s/layout/checkboxselectmultiple_inline.html"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
return super(InlineCheckboxes, self).render(
form, form_style, context, template_pack=template_pack,
extra_context={'inline_class': 'inline'}
)
class InlineRadios(Field):
"""
Layout object for rendering radiobuttons inline::
InlineRadios('field_name')
"""
template = "%s/layout/radioselect_inline.html"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
return super(InlineRadios, self).render(
form, form_style, context, template_pack=template_pack,
extra_context={'inline_class': 'inline'}
)
class FieldWithButtons(Div):
template = '%s/layout/field_with_buttons.html'
field_template = '%s/layout/field.html'
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, extra_context=None, **kwargs):
# We first render the buttons
field_template = self.field_template % template_pack
buttons = ''.join(
render_field(
field, form, form_style, context,
field_template, layout_object=self,
template_pack=template_pack, **kwargs
) for field in self.fields[1:]
)
extra_context = {'div': self, 'buttons': buttons}
template = self.get_template_name(template_pack)
if isinstance(self.fields[0], Field):
# FieldWithButtons(Field('field_name'), StrictButton("go"))
# We render the field passing its name and attributes
return render_field(
self.fields[0][0], form, form_style, context,
template, attrs=self.fields[0].attrs,
template_pack=template_pack, extra_context=extra_context, **kwargs
)
else:
return render_field(
self.fields[0], form, form_style, context, template,
extra_context=extra_context, **kwargs
)
class StrictButton(object):
"""
Layout object for rendering an HTML button::
Button("button content", css_class="extra")
"""
template = '%s/layout/button.html'
field_classes = 'btn'
def __init__(self, content, **kwargs):
self.content = content
self.template = kwargs.pop('template', self.template)
kwargs.setdefault('type', 'button')
# We turn css_id and css_class into id and class
if 'css_id' in kwargs:
kwargs['id'] = kwargs.pop('css_id')
kwargs['class'] = self.field_classes
if 'css_class' in kwargs:
kwargs['class'] += " %s" % kwargs.pop('css_class')
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
self.content = Template(text_type(self.content)).render(context)
template = self.template % template_pack
context.update({'button': self})
return render_to_string(template, context.flatten())
class Container(Div):
"""
Base class used for `Tab` and `AccordionGroup`, represents a basic container concept
"""
css_class = ""
def __init__(self, name, *fields, **kwargs):
super(Container, self).__init__(*fields, **kwargs)
self.template = kwargs.pop('template', self.template)
self.name = name
self._active_originally_included = "active" in kwargs
self.active = kwargs.pop("active", False)
if not self.css_id:
self.css_id = slugify(self.name)
def __contains__(self, field_name):
"""
check if field_name is contained within tab.
"""
return field_name in map(lambda pointer: pointer[1], self.get_field_names())
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
if self.active:
if not 'active' in self.css_class:
self.css_class += ' active'
else:
self.css_class = self.css_class.replace('active', '')
return super(Container, self).render(form, form_style, context, template_pack)
class ContainerHolder(Div):
"""
Base class used for `TabHolder` and `Accordion`, groups containers
"""
def first_container_with_errors(self, errors):
"""
Returns the first container with errors, otherwise returns None.
"""
for tab in self.fields:
errors_here = any(error in tab for error in errors)
if errors_here:
return tab
return None
def open_target_group_for_form(self, form):
"""
Makes sure that the first group that should be open is open.
This is either the first group with errors or the first group
in the container, unless that first group was originally set to
active=False.
"""
target = self.first_container_with_errors(form.errors.keys())
if target is None:
target = self.fields[0]
if not target._active_originally_included:
target.active = True
return target
target.active = True
return target
class Tab(Container):
"""
Tab object. It wraps fields in a div whose default class is "tab-pane" and
takes a name as first argument. Example::
Tab('tab_name', 'form_field_1', 'form_field_2', 'form_field_3')
"""
css_class = 'tab-pane'
link_template = '%s/layout/tab-link.html'
def render_link(self, template_pack=TEMPLATE_PACK, **kwargs):
"""
Render the link for the tab-pane. It must be called after render so css_class is updated
with active if needed.
"""
link_template = self.link_template % template_pack
return render_to_string(link_template, {'link': self})
class TabHolder(ContainerHolder):
"""
TabHolder object. It wraps Tab objects in a container. Requires bootstrap-tab.js::
TabHolder(
Tab('form_field_1', 'form_field_2'),
Tab('form_field_3')
)
"""
template = '%s/layout/tab.html'
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
for tab in self.fields:
tab.active = False
# Open the group that should be open.
self.open_target_group_for_form(form)
content = self.get_rendered_fields(form, form_style, context, template_pack)
links = ''.join(tab.render_link(template_pack) for tab in self.fields)
context.update({
'tabs': self,
'links': links,
'content': content
})
template = self.get_template_name(template_pack)
return render_to_string(template, context.flatten())
class AccordionGroup(Container):
"""
Accordion Group (pane) object. It wraps given fields inside an accordion
tab. It takes accordion tab name as first argument::
AccordionGroup("group name", "form_field_1", "form_field_2")
"""
template = "%s/accordion-group.html"
data_parent = "" # accordion parent div id.
class Accordion(ContainerHolder):
"""
Accordion menu object. It wraps `AccordionGroup` objects in a container::
Accordion(
AccordionGroup("group name", "form_field_1", "form_field_2"),
AccordionGroup("another group name", "form_field")
)
"""
template = "%s/accordion.html"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
content = ''
# accordion group needs the parent div id to set `data-parent` (I don't
# know why). This needs to be a unique id
if not self.css_id:
self.css_id = "-".join(["accordion", text_type(randint(1000, 9999))])
# Open the group that should be open.
self.open_target_group_for_form(form)
for group in self.fields:
group.data_parent = self.css_id
content += render_field(
group, form, form_style, context, template_pack=template_pack, **kwargs
)
template = self.get_template_name(template_pack)
context.update({'accordion': self, 'content': content})
return render_to_string(template, context.flatten())
class Alert(Div):
"""
`Alert` generates markup in the form of an alert dialog
Alert(content='<strong>Warning!</strong> Best check yo self, you're not looking too good.')
"""
template = "%s/layout/alert.html"
css_class = "alert"
def __init__(self, content, dismiss=True, block=False, **kwargs):
fields = []
if block:
self.css_class += ' alert-block'
Div.__init__(self, *fields, **kwargs)
self.template = kwargs.pop('template', self.template)
self.content = content
self.dismiss = dismiss
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
template = self.get_template_name(template_pack)
context.update({'alert': self, 'content': self.content, 'dismiss': self.dismiss})
return render_to_string(template, context.flatten())
class UneditableField(Field):
"""
Layout object for rendering fields as uneditable in bootstrap
Example::
UneditableField('field_name', css_class="input-xlarge")
"""
template = "%s/layout/uneditable_input.html"
def __init__(self, field, *args, **kwargs):
self.attrs = {'class': 'uneditable-input'}
super(UneditableField, self).__init__(field, *args, **kwargs)
class InlineField(Field):
template = "%s/layout/inline_field.html"
| |
# -*- coding: utf-8 -*-
"""
Shot-O-matic
~~~~~~
A simple screenshot upload/showcase website,
using the Flask microframework (http://flask.pocoo.org).
:copyright: (c) 2010 by Aljoscha Krettek.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import shutil
import glob
import sqlite3
from contextlib import closing
# for our decorators
from functools import wraps
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, send_file
from werkzeug import SharedDataMiddleware
from werkzeug import secure_filename
from werkzeug import generate_password_hash, check_password_hash
from werkzeug.contrib.sessions import FilesystemSessionStore
# configuration
import config
app = Flask(__name__)
app.secret_key = config.SECRET_KEY
app.debug = config.DEBUG
################################################################################
# DB stuff
def connect_db():
"""Returns a new connection to the database."""
return sqlite3.connect(config.DATABASE)
def query_db(query, args=(), one=False, db=None):
if db is None:
db = g.db
cur = db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def user_exists(name):
user = query_db('select * from users where name=?', [name], one=True)
if user is None:
return False
else:
return True
def _create_user(name, password, admin=False, db=None):
if db is None:
db = g.db
db.execute('insert into users VALUES(?, ?, ?, NULL)',
[name, generate_password_hash(password),admin])
user = query_db("select * from users where name=?", [name], one=True,
db=db)
screenshots_dir = user['name']
abs_path = os.path.join(config.SCREENSHOTS_DIR, screenshots_dir)
if not os.path.exists(abs_path):
os.mkdir(abs_path)
db.execute("update users set screenshots_dir = ? where name=?",
[screenshots_dir, name])
db.commit()
def _delete_user(name, db=None):
if db is None:
db = g.db
user = query_db("select * from users where name=?", [name], one=True,
db=db)
if user is None:
return
abs_path = os.path.join(config.SCREENSHOTS_DIR, user['screenshots_dir'])
if os.path.exists(abs_path):
shutil.rmtree(abs_path)
db.execute('delete from users where name=?', [name])
db.commit()
def init_db():
"""Creates the database tables."""
print "Creating database in '{0}'".format(config.DATABASE)
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
_create_user(config.DEFAULT_USERNAME,
config.DEFAULT_PASSWORD,
admin=True,
db=db)
################################################################################
# Per request stuff
@app.before_request
def before_request():
"""
Make sure we are connected to the database each request and also
do the session handling.
"""
g.db = connect_db()
session_store = FilesystemSessionStore(config.SESSIONS_DIR)
if 'sid' in session:
sid = session.get('sid')
g.session = session_store.get(sid)
if 'user' in g.session:
g.user = g.session['user']
else:
g.user = None
else:
g.session = session_store.new()
g.user = None
@app.after_request
def after_request(response):
"""
Closes the database again at the end of the request and store the
session if neccessary.
"""
session_store = FilesystemSessionStore(config.SESSIONS_DIR)
if g.session.should_save:
session_store.save(g.session)
session['sid'] = g.session.sid
session.permanent = True
# we have to do this because Flask
# stores the SecureCookie containing the "Session"
# before calling the "after_request" functions
app.save_session(session, response)
g.db.close()
return response
################################################################################
# Decorators
def admin_required(message="Admin status required to acccess this section."):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if g.user is None or not g.user['admin']:
flash(message, 'error')
return redirect(url_for('show_screenshots'))
return f(*args, **kwargs)
return decorated_function
return decorator
def login_required(message="You must be logged in to access this section."):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if g.user is None:
# allow for direct file upload with client
if (request.form.get('username', False) and
request.form.get('password', False)):
user = query_db('select * from users where name = ?',
[request.form['username']],
one=True)
if user and check_password_hash(user['password'],
request.form['password']):
g.user = user
return f(*args, **kwargs)
flash(message, 'notice')
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return decorated_function
return decorator
################################################################################
# Views
@app.route('/<user>')
@app.route('/')
def show_screenshots(user=None):
if user is None:
users = glob.glob(os.path.join(config.SCREENSHOTS_DIR, '*'))
users = [os.path.basename(name) for name in users]
else:
users = [user]
screenshots = []
for user in users:
user_shots = glob.glob(os.path.join(config.SCREENSHOTS_DIR, user, '*'))
for user_shot in user_shots:
screenshots.append((user, os.path.basename(user_shot)))
screenshots.sort(reverse=True, cmp=lambda x,y: cmp(x[1], y[1]))
show_all = request.args.get('all', 0)
if show_all == 0:
screenshots = screenshots[:10]
return render_template('show_screenshots.html', screenshots=screenshots)
@app.route('/<user>/shot/<shot>')
def screenshot(user, shot):
user = secure_filename(user)
shot = secure_filename(shot)
filename = os.path.join(config.SCREENSHOTS_DIR, user, shot)
if not os.path.exists(filename):
flash("User {0} has not uploaded {1}.".format(user, shot), 'error')
return redirect(url_for('show_screenshots'))
return send_file(filename)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in config.ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST', 'GET'])
@login_required("You need to be logged in in order to upload screenshots.")
def upload_screenshot():
if request.method == 'POST':
file = request.files['screenshot']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(config.SCREENSHOTS_DIR,
g.user['screenshots_dir'],
filename))
flash('Screenshot uploaded.', 'success')
return redirect(url_for('show_screenshots'))
else:
flash('Uploads of this filetype not allowed.', 'error')
return render_template('upload_screenshot.html')
@app.route('/<user>/delete/<shot>')
@login_required("You need to be logged in in order to delete screenshots.")
def delete_screenshot(user, shot):
user = secure_filename(user)
shot = secure_filename(shot)
filename = os.path.join(config.SCREENSHOTS_DIR, user, shot)
if not g.user['admin'] and user != g.user['name']:
flash("You can only delete your own screenshots.", 'notice')
return redirect(url_for('show_screenshots'))
if not os.path.exists(filename):
flash("Screenshot '{0}' does not exist.".format(shot), 'error')
return redirect(url_for('show_screenshots'))
os.remove(filename)
flash('Screenshot removed.', 'success')
return redirect(url_for('show_screenshots'))
@app.route('/users')
@login_required()
@admin_required()
def show_users():
users = query_db('select * from users')
return render_template('show_users.html', users=users)
@app.route('/users/add', methods=['POST'])
@login_required()
@admin_required()
def add_user():
_create_user(request.form['name'], request.form['password'])
flash('User added.', 'success')
return redirect(url_for('show_users'))
@app.route('/users/delete/<name>', methods=['POST', 'GET'])
@login_required()
@admin_required()
def delete_user(name):
user = query_db('select * from users where name=?', [name])
if len(user) <= 0:
flash('User does not exist.', 'error')
else:
_delete_user(name)
flash('User deleted.', 'success')
return redirect(url_for('show_users'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
user = query_db('select * from users where name = ?',
[request.form['username']],
one=True)
if user is None:
error = 'Invalid username.'
elif not check_password_hash(user['password'],request.form['password']):
error = 'Invalid password.'
else:
g.session['user'] = user
flash('You were logged in.', 'success')
if 'next' in request.args:
return redirect(request.args['next'])
else:
return redirect(url_for('show_screenshots'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('sid', None)
flash('You were logged out.', 'success')
return redirect(url_for('show_screenshots'))
if __name__ == '__main__':
app.debug = True
app.run()
| |
import logging
import json
from django.test import TestCase
from builds.models import Version
from projects.models import Project
from projects import tasks
log = logging.getLogger(__name__)
class PrivacyTests(TestCase):
fixtures = ["eric"]
def tearDown(self):
tasks.update_docs = self.old_bd
def setUp(self):
self.old_bd = tasks.update_docs
def mock(*args, **kwargs):
pass
#log.info("Mocking for great profit and speed.")
tasks.update_docs.delay = mock
def _create_kong(self, privacy_level='private',
version_privacy_level='private'):
self.client.login(username='eric', password='test')
log.info(("Making kong with privacy: %s and version privacy: %s"
% (privacy_level, version_privacy_level)))
r = self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Django Kong', 'language': 'en',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://django-kong.rtfd.org',
'repo': 'https://github.com/ericholscher/django-kong',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': 'latest',
'python_interpreter': 'python',
'privacy_level': privacy_level,
'version_privacy_level': version_privacy_level,
'description': 'OOHHH AH AH AH KONG SMASH',
'documentation_type': 'sphinx'})
self.assertEqual(r.status_code, 302)
self.assertAlmostEqual(Project.objects.count(), 1)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
return Project.objects.get(slug='django-kong')
def test_private_repo(self):
"""Check that private projects don't show up in: builds, downloads,
detail, homepage
"""
self._create_kong('private', 'private')
self.client.login(username='eric', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 200)
self.client.login(username='tester', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' not in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 404)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 404)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 404)
def test_protected_repo(self):
"""Check that protected projects don't show up in: builds, downloads,
detail, project list
"""
self._create_kong('protected', 'protected')
self.client.login(username='eric', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' in r.content)
r = self.client.get('/projects/')
self.assertTrue('Django Kong' in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 200)
self.client.login(username='tester', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' not in r.content)
r = self.client.get('/projects/')
self.assertTrue('Django Kong' not in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 200)
def test_public_repo(self):
"""Check that public projects show up in: builds, downloads, detail,
homepage
"""
self._create_kong('public', 'public')
self.client.login(username='eric', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 200)
self.client.login(username='tester', password='test')
r = self.client.get('/')
self.assertTrue('Django Kong' in r.content)
r = self.client.get('/projects/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/builds/django-kong/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/downloads/')
self.assertEqual(r.status_code, 200)
def test_private_branch(self):
kong = self._create_kong('public', 'private')
self.client.login(username='eric', password='test')
Version.objects.create(project=kong, identifier='test id',
verbose_name='test verbose', slug='test-slug')
r = self.client.post('/dashboard/django-kong/versions/',
{'version-test-slug': 'on',
'privacy-test-slug': 'private'})
self.assertEqual(Version.objects.count(), 1)
self.assertEqual(Version.objects.all()[0].privacy_level, 'private')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' in r.content)
# Make sure it doesn't show up as tester
self.client.login(username='tester', password='test')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' not in r.content)
def test_protected_branch(self):
kong = self._create_kong('public', 'protected')
self.client.login(username='eric', password='test')
Version.objects.create(project=kong, identifier='test id',
verbose_name='test verbose', slug='test-slug')
r = self.client.post('/dashboard/django-kong/versions/',
{'version-test-slug': 'on',
'privacy-test-slug': 'protected'})
self.assertEqual(Version.objects.count(), 1)
self.assertEqual(Version.objects.all()[0].privacy_level, 'protected')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' in r.content)
# Make sure it doesn't show up as tester
self.client.login(username='tester', password='test')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' not in r.content)
def test_public_branch(self):
kong = self._create_kong('public', 'public')
self.client.login(username='eric', password='test')
Version.objects.create(project=kong, identifier='test id',
verbose_name='test verbose', slug='test-slug')
r = self.client.post('/dashboard/django-kong/versions/',
{'version-test-slug': 'on',
'privacy-test-slug': 'public'})
self.assertEqual(Version.objects.count(), 1)
self.assertEqual(Version.objects.all()[0].privacy_level, 'public')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' in r.content)
# Make sure it doesn't show up as tester
self.client.login(username='tester', password='test')
r = self.client.get('/projects/django-kong/')
self.assertTrue('test-slug' in r.content)
def test_public_repo_api(self):
self._create_kong('public', 'public')
self.client.login(username='eric', password='test')
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 1)
self.client.login(username='tester', password='test')
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 1)
def test_protected_repo_api(self):
self._create_kong('protected', 'protected')
self.client.login(username='eric', password='test')
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 1)
self.client.login(username='tester', password='test')
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 0)
# Need to figure out how to properly filter the detail view in
# tastypie. Protected stuff won't show up in detail pages on the API
# currently.
"""
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
"""
def test_private_repo_api(self):
self._create_kong('private', 'private')
self.client.login(username='eric', password='test')
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 1)
self.client.login(username='tester', password='test')
resp = self.client.get("http://testserver/api/v1/project/django-kong/",
data={"format": "json"})
self.assertEqual(resp.status_code, 404)
resp = self.client.get("http://testserver/api/v1/project/",
data={"format": "json"})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['meta']['total_count'], 0)
def test_private_doc_serving(self):
kong = self._create_kong('public', 'private')
self.client.login(username='eric', password='test')
Version.objects.create(project=kong, identifier='test id',
verbose_name='test verbose', slug='test-slug')
self.client.post('/dashboard/django-kong/versions/',
{'version-test-slug': 'on',
'privacy-test-slug': 'private'})
r = self.client.get('/docs/django-kong/en/test-slug/')
self.client.login(username='eric', password='test')
self.assertEqual(r.status_code, 200)
# Make sure it doesn't show up as tester
self.client.login(username='tester', password='test')
r = self.client.get('/docs/django-kong/en/test-slug/')
self.assertEqual(r.status_code, 401)
| |
#!/usr/bin/env python
from __future__ import print_function
import fileinput
import glob
import os
import shutil
import sys
### Begin compatibility block for pre-v2.6: ###
#
# ignore_patterns and copytree funtions are copies of what is included
# in shutil.copytree of python v2.6 and later.
#
### When compatibility is no-longer needed, this block
### can be replaced with:
###
### from shutil import ignore_patterns, copytree
###
### or the "shutil." qualifier can be prepended to the function
### names where they are used.
try:
WindowsError
except NameError:
WindowsError = None
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
import fnmatch
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
from shutil import copy2, Error, copystat
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
### End compatibility block for pre-v2.6 ###
def copy_if_out_of_date(original, derived):
if (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime):
try:
shutil.copyfile(original, derived)
except IOError:
if os.path.basename(original) == 'matplotlibrc':
msg = "'%s' not found. " % original + \
"Did you run `python setup.py build`?"
raise IOError(msg)
else:
raise
def check_build():
build_dirs = ['build', 'build/doctrees', 'build/html', 'build/latex',
'build/texinfo', '_static', '_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def doctest():
os.system('sphinx-build -b doctest -d build/doctrees . build/doctest')
def linkcheck():
os.system('sphinx-build -b linkcheck -d build/doctrees . build/linkcheck')
def html():
check_build()
copy_if_out_of_date('../lib/matplotlib/mpl-data/matplotlibrc', '_static/matplotlibrc')
if small_docs:
options = "-D plot_formats=\"[('png', 80)]\""
else:
options = ''
if os.system('sphinx-build %s -b html -d build/doctrees . build/html' % options):
raise SystemExit("Building HTML failed.")
figures_dest_path = 'build/html/pyplots'
if os.path.exists(figures_dest_path):
shutil.rmtree(figures_dest_path)
copytree(
'pyplots', figures_dest_path,
ignore=ignore_patterns("*.pyc"))
# Clean out PDF files from the _images directory
for filename in glob.glob('build/html/_images/*.pdf'):
os.remove(filename)
shutil.copy('../CHANGELOG', 'build/html/_static/CHANGELOG')
def latex():
check_build()
#figs()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees . build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering LaTeX failed.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def texinfo():
check_build()
#figs()
if sys.platform != 'win32':
# Texinfo format.
if os.system(
'sphinx-build -b texinfo -d build/doctrees . build/texinfo'):
raise SystemExit("Building Texinfo failed.")
# Produce info file.
os.chdir('build/texinfo')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering Texinfo failed.")
os.chdir('../..')
else:
print('texinfo build has not been tested on windows')
def clean():
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("examples", ignore_errors=True)
for pattern in ['mpl_examples/api/*.png',
'mpl_examples/pylab_examples/*.png',
'mpl_examples/pylab_examples/*.pdf',
'mpl_examples/units/*.png',
'pyplots/tex_demo.png',
'_static/matplotlibrc',
'_templates/gallery.html',
'users/installing.rst']:
for filename in glob.glob(pattern):
if os.path.exists(filename):
os.remove(filename)
def all():
#figs()
html()
latex()
funcd = {
'html' : html,
'latex' : latex,
'texinfo' : texinfo,
'clean' : clean,
'all' : all,
'doctest' : doctest,
'linkcheck': linkcheck,
}
small_docs = False
# Change directory to the one containing this file
current_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
copy_if_out_of_date('../INSTALL', 'users/installing.rst')
# Create the examples symlink, if it doesn't exist
required_symlinks = [
('mpl_examples', '../examples/'),
('mpl_toolkits/axes_grid/examples', '../../../examples/axes_grid/')
]
for link, target in required_symlinks:
if not os.path.exists(link):
if hasattr(os, 'symlink'):
os.symlink(target, link)
else:
shutil.copytree(os.path.join(link, '..', target), link)
if len(sys.argv)>1:
if '--small' in sys.argv[1:]:
small_docs = True
sys.argv.remove('--small')
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s'%(
arg, funcd.keys()))
func()
else:
small_docs = False
all()
os.chdir(current_dir)
| |
from __future__ import print_function
# import numpy in two ways, both uses needed
import numpy as np
import numpy
import numba.unittest_support as unittest
from numba import njit, jit, testing, utils
from numba.errors import (TypingError, LoweringError, UnsupportedError)
from .support import TestCase, tag
from numba.six import exec_
class TestClosure(TestCase):
def run_jit_closure_variable(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Like globals in Numba, the value of the closure is captured
# at time of JIT
Y = 12 # should not affect function
self.assertEqual(c_add_Y(1), 11)
def test_jit_closure_variable(self):
self.run_jit_closure_variable(forceobj=True)
def test_jit_closure_variable_npm(self):
self.run_jit_closure_variable(nopython=True)
def run_rejitting_closure(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Redo the jit
Y = 12
c_add_Y_2 = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y_2(1), 13)
Y = 13 # should not affect function
self.assertEqual(c_add_Y_2(1), 13)
self.assertEqual(c_add_Y(1), 11) # Test first function again
def test_rejitting_closure(self):
self.run_rejitting_closure(forceobj=True)
def test_rejitting_closure_npm(self):
self.run_rejitting_closure(nopython=True)
def run_jit_multiple_closure_variables(self, **jitargs):
Y = 10
Z = 2
def add_Y_mult_Z(x):
return (x + Y) * Z
c_add_Y_mult_Z = jit('i4(i4)', **jitargs)(add_Y_mult_Z)
self.assertEqual(c_add_Y_mult_Z(1), 22)
def test_jit_multiple_closure_variables(self):
self.run_jit_multiple_closure_variables(forceobj=True)
def test_jit_multiple_closure_variables_npm(self):
self.run_jit_multiple_closure_variables(nopython=True)
def run_jit_inner_function(self, **jitargs):
def mult_10(a):
return a * 10
c_mult_10 = jit('intp(intp)', **jitargs)(mult_10)
c_mult_10.disable_compile()
def do_math(x):
return c_mult_10(x + 4)
c_do_math = jit('intp(intp)', **jitargs)(do_math)
c_do_math.disable_compile()
with self.assertRefCount(c_do_math, c_mult_10):
self.assertEqual(c_do_math(1), 50)
def test_jit_inner_function(self):
self.run_jit_inner_function(forceobj=True)
def test_jit_inner_function_npm(self):
self.run_jit_inner_function(nopython=True)
@testing.allow_interpreter_mode
def test_return_closure(self):
def outer(x):
def inner():
return x + 1
return inner
cfunc = jit(outer)
self.assertEqual(cfunc(10)(), outer(10)())
class TestInlinedClosure(TestCase):
"""
Tests for (partial) closure support in njit. The support is partial
because it only works for closures that can be successfully inlined
at compile time.
"""
@tag('important')
def test_inner_function(self):
def outer(x):
def inner(x):
return x * x
return inner(x) + inner(x)
cfunc = njit(outer)
self.assertEqual(cfunc(10), outer(10))
@tag('important')
def test_inner_function_with_closure(self):
def outer(x):
y = x + 1
def inner(x):
return x * x + y
return inner(x) + inner(x)
cfunc = njit(outer)
self.assertEqual(cfunc(10), outer(10))
@tag('important')
def test_inner_function_with_closure_2(self):
def outer(x):
y = x + 1
def inner(x):
return x * y
y = inner(x)
return y + inner(x)
cfunc = njit(outer)
self.assertEqual(cfunc(10), outer(10))
@unittest.skipIf(utils.PYVERSION < (3, 0), "needs Python 3")
def test_inner_function_with_closure_3(self):
code = """
def outer(x):
y = x + 1
z = 0
def inner(x):
nonlocal z
z += x * x
return z + y
return inner(x) + inner(x) + z
"""
ns = {}
exec_(code.strip(), ns)
cfunc = njit(ns['outer'])
self.assertEqual(cfunc(10), ns['outer'](10))
@tag('important')
def test_inner_function_nested(self):
def outer(x):
def inner(y):
def innermost(z):
return x + y + z
s = 0
for i in range(y):
s += innermost(i)
return s
return inner(x * x)
cfunc = njit(outer)
self.assertEqual(cfunc(10), outer(10))
@tag('important')
def test_bulk_use_cases(self):
""" Tests the large number of use cases defined below """
# jitted function used in some tests
@njit
def fib3(n):
if n < 2:
return n
return fib3(n - 1) + fib3(n - 2)
def outer1(x):
""" Test calling recursive function from inner """
def inner(x):
return fib3(x)
return inner(x)
def outer2(x):
""" Test calling recursive function from closure """
z = x + 1
def inner(x):
return x + fib3(z)
return inner(x)
def outer3(x):
""" Test recursive inner """
def inner(x):
if x < 2:
return 10
else:
inner(x - 1)
return inner(x)
def outer4(x):
""" Test recursive closure """
y = x + 1
def inner(x):
if x + y < 2:
return 10
else:
inner(x - 1)
return inner(x)
def outer5(x):
""" Test nested closure """
y = x + 1
def inner1(x):
z = y + x + 2
def inner2(x):
return x + z
return inner2(x) + y
return inner1(x)
def outer6(x):
""" Test closure with list comprehension in body """
y = x + 1
def inner1(x):
z = y + x + 2
return [t for t in range(z)]
return inner1(x)
_OUTER_SCOPE_VAR = 9
def outer7(x):
""" Test use of outer scope var, no closure """
z = x + 1
return x + z + _OUTER_SCOPE_VAR
_OUTER_SCOPE_VAR = 9
def outer8(x):
""" Test use of outer scope var, with closure """
z = x + 1
def inner(x):
return x + z + _OUTER_SCOPE_VAR
return inner(x)
def outer9(x):
""" Test closure assignment"""
z = x + 1
def inner(x):
return x + z
f = inner
return f(x)
def outer10(x):
""" Test two inner, one calls other """
z = x + 1
def inner(x):
return x + z
def inner2(x):
return inner(x)
return inner2(x)
def outer11(x):
""" return the closure """
z = x + 1
def inner(x):
return x + z
return inner
def outer12(x):
""" closure with kwarg"""
z = x + 1
def inner(x, kw=7):
return x + z + kw
return inner(x)
def outer13(x, kw=7):
""" outer with kwarg no closure"""
z = x + 1 + kw
return z
def outer14(x, kw=7):
""" outer with kwarg used in closure"""
z = x + 1
def inner(x):
return x + z + kw
return inner(x)
def outer15(x, kw=7):
""" outer with kwarg as arg to closure"""
z = x + 1
def inner(x, kw):
return x + z + kw
return inner(x, kw)
def outer16(x):
""" closure is generator, consumed locally """
z = x + 1
def inner(x):
yield x + z
return list(inner(x))
def outer17(x):
""" closure is generator, returned """
z = x + 1
def inner(x):
yield x + z
return inner(x)
def outer18(x):
""" closure is generator, consumed in loop """
z = x + 1
def inner(x):
yield x + z
for i in inner(x):
t = i
return t
def outer19(x):
""" closure as arg to another closure """
z1 = x + 1
z2 = x + 2
def inner(x):
return x + z1
def inner2(f, x):
return f(x) + z2
return inner2(inner, x)
def outer20(x):
#""" Test calling numpy in closure """
z = x + 1
def inner(x):
return x + numpy.cos(z)
return inner(x)
def outer21(x):
#""" Test calling numpy import as in closure """
z = x + 1
def inner(x):
return x + np.cos(z)
return inner(x)
# functions to test that are expected to pass
f = [outer1, outer2, outer5, outer6, outer7, outer8,
outer9, outer10, outer12, outer13, outer14,
outer15, outer19, outer20, outer21]
for ref in f:
cfunc = njit(ref)
var = 10
self.assertEqual(cfunc(var), ref(var))
# test functions that are expected to fail
with self.assertRaises(NotImplementedError) as raises:
cfunc = jit(nopython=True)(outer3)
cfunc(var)
msg = "Unsupported use of op_LOAD_CLOSURE encountered"
self.assertIn(msg, str(raises.exception))
with self.assertRaises(NotImplementedError) as raises:
cfunc = jit(nopython=True)(outer4)
cfunc(var)
msg = "Unsupported use of op_LOAD_CLOSURE encountered"
self.assertIn(msg, str(raises.exception))
with self.assertRaises(UnsupportedError) as raises:
cfunc = jit(nopython=True)(outer11)
cfunc(var)
msg = "make_function"
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc = jit(nopython=True)(outer16)
cfunc(var)
msg = "with argument(s) of type(s): (none)"
self.assertIn(msg, str(raises.exception))
with self.assertRaises(LoweringError) as raises:
cfunc = jit(nopython=True)(outer17)
cfunc(var)
msg = "'NoneType' object has no attribute 'yield_points'"
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc = jit(nopython=True)(outer18)
cfunc(var)
msg = "Invalid use of getiter with parameters (none)"
self.assertIn(msg, str(raises.exception))
class TestObjmodeFallback(TestCase):
# These are all based on tests from real life issues where, predominantly,
# the object mode fallback compilation path would fail as a result of the IR
# being mutated by closure inlining in npm. Tests are named after issues,
# all of which failed to compile as of 0.44.
decorators = [jit, jit(forceobj=True)]
def test_issue2955(self):
def numbaFailure(scores, cooc):
rows, cols = scores.shape
for i in range(rows):
coxv = scores[i]
groups = sorted(set(coxv), reverse=True)
[set(np.argwhere(coxv == x).flatten()) for x in groups]
x = np.random.random((10, 10))
y = np.abs((np.random.randn(10, 10) * 1.732)).astype(np.int)
for d in self.decorators:
d(numbaFailure)(x, y)
def test_issue3239(self):
def fit(X, y):
if type(X) is not np.ndarray:
X = np.array(X)
if type(y) is not np.ndarray:
y = np.array(y)
m, _ = X.shape
X = np.hstack((
np.array([[1] for _ in range(m)]),
X
))
res = np.dot(np.dot(X, X.T), y)
intercept = res[0]
coefs = res[1:]
return intercept, coefs
for d in self.decorators:
res = d(fit)(np.arange(10).reshape(1, 10),
np.arange(10).reshape(1, 10))
exp = fit(np.arange(10).reshape(1, 10),
np.arange(10).reshape(1, 10))
np.testing.assert_equal(res, exp)
def test_issue3289(self):
b = [(5, 124), (52, 5)]
def a():
[b[index] for index in [0, 1]]
for x in range(5):
pass
for d in self.decorators:
d(a)()
def test_issue3413(self):
def foo(data):
# commenting out this line prevents the crash:
t = max([len(m) for m in data['y']])
mask = data['x'] == 0
if any(mask):
z = 15
return t, z
data = {'x': np.arange(5), 'y': [[1], [2, 3]]}
for d in self.decorators:
res = d(foo)(data)
np.testing.assert_allclose(res, foo(data))
def test_issue3659(self):
def main():
a = np.array(((1, 2), (3, 4)))
return np.array([x for x in a])
for d in self.decorators:
res = d(main)()
np.testing.assert_allclose(res, main())
def test_issue3803(self):
def center(X):
np.array([np.float_(x) for x in X.T])
np.array([np.float_(1) for _ in X.T])
return X
X = np.zeros((10,))
for d in self.decorators:
res = d(center)(X)
np.testing.assert_allclose(res, center(X))
if __name__ == '__main__':
unittest.main()
| |
# pylint: disable-msg=E1101,W0613,W0603
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.compat import StringIO, long, u, to_str
from pandas import compat, isna
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import (get_filepath_or_buffer, _get_handle,
_infer_compression, _stringify_path,
BaseIterator)
from pandas.io.parsers import _validate_integer
import pandas.core.common as com
from pandas.core.reshape.concat import concat
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema, parse_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
if not index and orient not in ['split', 'table']:
raise ValueError("'index=False' is only valid when 'orient' is "
"'split' or 'table'")
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler,
index=index).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
fh, handles = _get_handle(path_or_buf, 'w', compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise com.AbstractMethodError(self)
def write(self):
return self._write(self.obj, self.orient, self.double_precision,
self.ensure_ascii, self.date_unit,
self.date_format == 'iso', self.default_handler)
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = {"name": obj.name, "data": obj.values}
return super(SeriesWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = obj.to_dict(orient='split')
del obj["index"]
return super(FrameWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, index, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
self.orient = 'records'
self.index = index
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
data = super(JSONTableWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates,
default_handler)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
chunksize: integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes,
convert_dates, keep_default_dates, numpy, precise_float,
date_unit, encoding, lines, chunksize, compression):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(filepath_or_buffer, 'r',
encoding=self.encoding,
compression=self.compression)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""Combines a list of JSON objects into one JSON object"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
def read(self):
"""Read the whole JSON input into a pandas object"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""parses a json document into a pandas object"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it. If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
.format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise com.AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isna(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except Exception:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise com.AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
elif orient == 'table':
self.obj = parse_table_schema(json,
precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| |
# BOM_SOLAR_ASCII_TO_NETCDF.PY
#
# The input proprietary format supported by this example is a Bureau of
# Meteorology (BoM) ArcView ASCII grid file. The format contains a header
# (defines the shape and size of the grid), space-separated ASCII data, and
# tail metadata that contains internal BoM processing information.
#
# The ASCII data is converted to a NumPy array. The precision of values within
# the grid are reduced to 1 decimal place (for consistency between variables),
# and the missing value is redefined to -999.0.
#
# File-specific metadata (from the header, the tail, and from the processing in
# this script) is written to a JSON file. In a second-step, the JSON file is
# added to the NetCDF file by netcdf_json_wrapper.py. This two-step
# process allows manual editing or checking of the metadata if required.
#
# By default the following files are created:
# - [verbose filename].json
# - [verbose filename].nc
# The selection of outputs can be customised by parameters to
# bom_ascii_to_flt_nc().
import os
import numpy as np
from scipy import interpolate
import re
import hashlib
from datetime import timedelta
from datetime import datetime
from collections import OrderedDict
import netcdf_builder as nb
import json_handler as jh
import numpy_routines as nr
from solar_obstime_lookup import get_lookup_list
def split_bom_file(fname):
"""
Split a BoM ASCII grid file into its header, data and tail components
End-of-line characters and whitespace at either end of a line are
striped off. The head and tail components are returned as a list of
lines. The data component is returned as a list of lists, with each
inner list being a list of (string) values in a line.
The number of rows and columns in the grid are determined from the
head component as it is read ('ncols' and 'nrows', respectively).
A ValueError is raised if either the number of data elements in a line
is not equal to the expected number of columns, or if the number of
data lines is not equal to the expected number of rows.
"""
headmeta = []
datarows = []
tailmeta = []
ncols = -1
nrows = -1
isheader = 1
f = open(fname,'r')
for line in f:
line=line.strip()
if not line: continue
a = line.split()
# Assume header lines contain only 2 whitespace-split elements
if (len(a)==2) and isheader:
headmeta.append(line)
if a[0].lower()=='ncols': ncols=float(a[1])
elif a[0].lower()=='nrows': nrows=float(a[1])
# Else if the number of elements==ncols the line is probably data
elif len(a)==ncols:
isheader = 0
datarows.append(a)
# Else if we don't have the expected number of data rows there is
# an error
elif len(datarows)!=nrows:
raise ValueError("Line contains data but not of length "+ncols)
# Anything else is tail metadata
else:
tailmeta.append(line)
f.close()
return (headmeta,datarows,tailmeta)
def resample_data(datarows,metalist):
"""
Convert a list of lists of (string) values to a 2D NumPy array. Create
dimension vectors based on extent and cell-size information in 'metalist'.
Create target dimension vectors based on pre-defined extents.
Check the correctness of the given missing value (in metalist) against
the data. Replace all missing values with a standard missing value.
Copy data from the input array to an output array defined by the
target dimension vectors. The precision of values in the output array are
reduced to 1 decimal place (for consistency). Return the output array,
target dimension vectors and a dict that gives the extents of the target
dimension vectors and the new missing value.
"""
# Parse metalist into a dict
meta = {y[0].lower():float(y[1]) for y in [x.split() for x in metalist]}
miss = -999.0
# ASCII dimensions
input_lon = nr.create_vector(meta['xllcorner']+meta['cellsize']/2.0, \
meta['ncols'], \
meta['cellsize'])
input_lat = nr.create_vector(meta['yllcorner']+meta['cellsize']/2.0, \
meta['nrows'], \
meta['cellsize'])
input_lat = input_lat[::-1] # reverse elements
# Output dimensions - not remapping, so commented out
#(xs, xn, xc) = (112, 841, 0.05) # start, number, cellsize
#(ys, yn, yc) = (-44, 681, 0.05) # start, number, cellsize
#output_lon = nr.create_vector(xs, xn, xc)
#output_lat = nr.create_vector(ys, yn, yc)
#output_lat = output_lat[::-1] # reverse elements
# Copy datarows into a 2D array
input_data = np.array(datarows,dtype=np.float64)
meta['nodata_value'] = \
check_bom_missing(input_data[0,:], 99999.9, meta['nodata_value'])
if meta['nodata_value']!=miss:
input_data = nr.replace_values(input_data,meta['nodata_value'],miss)
print "Replaced missing data %s with %s" % (meta['nodata_value'],miss)
# Create output array
#output_data = np.zeros((output_lat.size,output_lon.size))+miss
# Copy data onto output grid
#output_data = nr.copy_grids(input_data,input_lon,input_lat,
# output_data,output_lon,output_lat)
# Reduce precision of values to 1 decimal place and convert to f32
#output_data = output_data.round(decimals=1)
#output_data = np.float32(output_data)
input_data = input_data.round(decimals=1)
input_data = np.float32(input_data)
input_dict = {'xmin':min(input_lon).round(decimals=3),
'xmax':max(input_lon).round(decimals=3),
'xstep':meta['cellsize'],
'xnum':meta['ncols'],
'xunits':'degrees_east',
'ymin':min(input_lat).round(decimals=3),
'ymax':max(input_lat).round(decimals=3),
'ystep':meta['cellsize'],
'ynum':meta['nrows'],
'yunits':'degrees_north',
'missing':miss}
return (input_data, input_lat, input_lon, input_dict)
def check_bom_missing(arr,test,default):
"""
Test for the possibility of a different missing value in 'arr'
rather than assuming that the given missing value is correct.
If occurence of 'test' is > 70% of size of 'arr' then return 'test'.
>>>check_missing(numpy.zeros(5)+99999.9, 99999.9, -99.99)
99999.9
>>>
If occurence of 'test' is <= 70% of size of 'arr' then return 'default'.
>>>check_missing(numpy.zeros(5), 99999.9, -99.99)
-99.99
>>>
"""
if np.sum(np.where(arr==test,1,0)) > arr.size*0.7:
return test
else:
return default
def create_obs_time(latvec,lonvec,d1):
"""
Create a data layer of the observation time for each pixel from a lookup
table. The lookup table defines the minutes after the start hour (d1) at
five degree increments of latitude from -10 to -44 N.
Linearly interpolate from the five degree increment values to the latitude
vector.
A 2D data layer the same size as latvec,lonvec is returned.
"""
# Choose lookup table based on date and hour
xlook,ylook = None,None
for obj in get_lookup_list():
if obj.is_applicable(d1):
xlook,ylook = obj.get_lookup()
break
if xlook is None:
raise ValueError("Date-Hour not found in solar_observation_time_lookup: "+d1.strftime("%Y-Ym-%dT%HZ"))
return None
# The lookup table values are nearly linear but not quite (R^2>=0.9944)
# So going to choose cubic spline interpolation
# http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
# Want monotonicaly increasing xlook (latitudes).
xlook = xlook[::-1]
ylook = ylook[::-1]
tck = interpolate.splrep(xlook,ylook,s=0)
obsvec = interpolate.splev(latvec,tck,der=0)
# Replicate obsvec along width of lonvec
X,obsarr = np.meshgrid(lonvec,obsvec)
# Reduce precision to that given by the lookup table and convert to float
obsarr = obsarr.round(decimals=1)
obsarr = np.float32(obsarr)
return obsarr
def create_meta(d1,d2,datadict):
"""
Create an OrderedDict of global and variable metadata specific to this
file from various sources of information including the start and end dates,
the data extents dict and the history information.
"""
meta = OrderedDict()
# Make creation and modification dates
dmodify = datetime.utcnow().strftime("%Y%m%dT%H%M%S")
# Create multiline history text
history = """
{:s}: Reformatted to NetCDF
Solar data are written with 1 decimal precision although all the original data
values are integers.
Observation times were interpolated from the date-satellite-latitude lookup
table to the latitude vector with a cubic spline method. The interpolated
vector was then replicated across the grid for each longitude. Observation
times were written with 1 decimal precision for consistency with the lookup
table.
Solar and Observation time data have a consistent no-data value of {:.0f}.
""".format(dmodify,datadict['missing'])
# The date*, geospatial* and time* attributes come from the Attribute
# Convention for Dataset Discovery (ACDD). See,
# http://www.unidata.ucar.edu/software/netcdf/conventions.html
meta['history'] = history
meta['date_created'] = "unknown"
meta['date_modified'] = dmodify
meta['geospatial_lat_min'] = "{:.2f}".format(datadict['ymin'])
meta['geospatial_lat_max'] = "{:.2f}".format(datadict['ymax'])
meta['geospatial_lat_step'] = "{:.2f}".format(datadict['ystep'])
meta['geospatial_lat_units'] = datadict['yunits']
meta['geospatial_lon_min'] = "{:.2f}".format(datadict['xmin'])
meta['geospatial_lon_max'] = "{:.2f}".format(datadict['xmax'])
meta['geospatial_lon_step'] = "{:.2f}".format(datadict['xstep'])
meta['geospatial_lon_units'] = datadict['xunits']
meta['time_coverage_start'] = d1.strftime("%Y-%m-%dT%HZ")
meta['time_coverage_end'] = d2.strftime("%Y-%m-%dT%HZ")
meta['time_coverage_duration'] = "P1D"
meta['time_coverage_resolution'] = "PT1H"
meta['solar_dni:long_name'] = "Gridded hourly solar direct normal irradiance"
meta['solar_dni:standard_name'] = "surface_downwelling_shortwave_flux_in_air_due_to_direct_fraction"
meta['solar_dni:units'] = "W m-2"
meta['solar_dni:grid_mapping'] = 'crs'
meta['solar_ghi:long_name'] = "Gridded hourly solar global horizontal irradiance"
meta['solar_ghi:standard_name'] = "surface_downwelling_shortwave_flux_in_air"
meta['solar_ghi:units'] = "W m-2"
meta['solar_ghi:grid_mapping'] = 'crs'
meta['obs_time:long_name'] = 'Interpolated instantaneous pixel observation time relative to time dimension value'
meta['obs_time:standard_name'] = 'time'
meta['obs_time:units'] = 'minutes'
meta['obs_time:grid_mapping'] = 'crs'
meta['crs:grid_mapping_name'] = 'latitude_longitude'
meta['crs:long_name'] = 'WGS 1984 datum'
meta['crs:longitude_of_prime_meridian'] = str(0.0)
meta['crs:semi_major_axis'] = str(6378137.0)
meta['crs:inverse_flattening'] = str(298.257223563)
return meta
def bom_ascii_to_nc(year,dates,froot):
debug = False
latvec = None
lonvec = None
ncobj = None
latslice = slice(None,None,None)
lonslice = slice(None,None,None)
missed_dates = []
adict = {}
for dti,dt in enumerate(dates):
dni,ghi = get_solar_files(str(year),dt)
index = (slice(dti,None,None),latslice,lonslice)
if dni is not None:
# Split the input file into metadata and data components
dni_head,dni_rows,dni_history = split_bom_file(dni)
# Resample the data
dni_data,dni_lat,dni_lon,dni_dict = resample_data(dni_rows,dni_head)
else:
if debug: print "No dni data:",dt
if ghi is not None:
# Split the input file into metadata and data components
ghi_head,ghi_rows,ghi_history = split_bom_file(ghi)
# Resample the data
ghi_data,ghi_lat,ghi_lon,ghi_dict = resample_data(ghi_rows,ghi_head)
else:
if debug: print "No ghi data:",dt
# Skip initial dates until we get a valid file because we need lat,lon
if latvec is None and dni is not None:
if debug: print "Using dni_lat:",dt
latvec = dni_lat
lonvec = dni_lon
adict = dni_dict
miss = adict['missing']
if latvec is None and ghi is not None:
if debug: print "Using ghi_lat:",dt
latvec = ghi_lat
lonvec = ghi_lon
adict = ghi_dict
miss = adict['missing']
if latvec is None:
if debug: print "Save miss:",dt
missed_dates.append(dt)
continue
# Initialise the netcdf object
if ncobj is None:
if debug: print "Define ncobj:",dt
dni_var = 'solar_dni'
ghi_var = 'solar_ghi'
obs_var = 'obs_time'
ncobj = nb.nc_open(froot+'.nc','w',format='NETCDF4_CLASSIC')
nb.nc_set_timelatlon(ncobj,None,len(latvec),len(lonvec))
nb.nc_set_var(ncobj,dni_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,ghi_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,obs_var,fill=miss,zlib=True)
nb.nc_set_var(ncobj,'crs',dims=(),dtype="i4") # Grid mapping container
nb.nc_add_data(ncobj,'latitude',latvec)
nb.nc_add_data(ncobj,'longitude',lonvec)
dni_nc = ncobj.variables[dni_var]
ghi_nc = ncobj.variables[ghi_var]
obs_nc = ncobj.variables[obs_var]
# Add observation time layers for any missed dates
for mi,md in enumerate(missed_dates):
if debug: print "Add missed:",md
obs_data = create_obs_time(latvec,lonvec,md)
#nb.nc_add_data(ncobj,obs_var,obs_data,
# index=(slice(mi,None,None),latslice,lonslice))
obs_nc[mi,:,:] = obs_data
missed_dates = []
# Calculate the observation time layer
obs_data = create_obs_time(latvec,lonvec,dt)
obs_nc[dti,:,:] = obs_data
# Add data
if dni is not None:
#nb.nc_add_data(ncobj,dni_var,dni_data,index=index)
if debug: print "Add dni:",dni
dni_nc[dti,:,:] = dni_data
if ghi is not None:
if debug: print "Add ghi:",ghi
#nb.nc_add_data(ncobj,ghi_var,ghi_data,index=index)
ghi_nc[dti,:,:] = ghi_data
# Add time values
nb.nc_add_time(ncobj,dates)
# Create an ordered metadata dictionary
meta = create_meta(dates[0],dates[-1],adict)
# Output the file-specific metadata to a JSON file regardless of the
# requested output formats
jh.json_dump(meta, froot+'.json')
# Setting attributes here is optional in this example because it is
# known that netcdf_json_wrapper.py will be called next with
# 'froot+".json"' as one of the input files.
nb.nc_set_attributes(ncobj,meta)
if debug: print "Added attributes"
nb.nc_close(ncobj)
print "Wrote: "+froot+".nc"
return froot
def get_dates(year,month,day):
# There are no ascii files for 12-17UT.
# A month of files extend from 18UT the day before the first day of the
# month to 11 UT on the last day of the month.
# 18UT the day before
d1 = datetime(year,month,day,0) - timedelta(hours=6)
# 12UT the day
d2 = datetime(year,month,day,12)
dt = []
while d1 < d2:
if d1.hour <= 11 or d1.hour >= 18: dt.append(d1)
d1 = d1+timedelta(hours=1)
return dt
def get_solar_files(year,dt):
base = '/data/remotesensing/MTSAT-BoM/ascii-20121220'
dni_path = os.path.join(base, 'time_series_hourly_dni')
ghi_path = os.path.join(base, 'time_series_hourly_ghi')
dtstr = dt.strftime('%Y%m%d_%HUT')
dni = os.path.join(dni_path,year,'solar_dni_'+dtstr+'.txt')
ghi = os.path.join(ghi_path,year,'solar_ghi_'+dtstr+'.txt')
if not os.path.exists(dni): dni = None
if not os.path.exists(ghi): ghi = None
return dni,ghi
if __name__ == '__main__':
import sys
if len(sys.argv) < 4:
print "Usage:"
print " ", sys.argv[0], "year month day [out_path]"
exit()
else:
outpath = '.'
year,month,day = int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])
if len(sys.argv) >= 5:
outpath = sys.argv[4]
if not os.path.exists(outpath): os.makedirs(outpath)
if not os.path.exists(outpath):
exit("Could not create path", outpath)
outpath = re.sub('/$','',outpath)
dates = get_dates(year,month,day)
outroot = 'solar_dni_ghi_{:04d}{:02d}{:02d}'.format(year,month,day)
outroot = os.path.join(outpath,outroot)
bom_ascii_to_nc(year,dates,outroot)
| |
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import Album, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ['error!']
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks']
)
class SystemChecksTestCase(SimpleTestCase):
@override_settings(DEBUG=True)
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
admin.sites.system_check_errors = []
@override_settings(DEBUG=True)
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
admin.sites.system_check_errors = []
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
hint=None,
obj=SongAdmin,
id='admin.E122',
)
]
self.assertEqual(errors, expected)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
Tests for a tuple/list for the first fieldset's fields.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
Tests for a tuple/list for the second fieldset's fields.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
Ensure that a model without a GenericForeignKey raises problems if it's included
in an GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
hint=None,
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'name' and object ID field 'object_id'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'content_type' and object ID field 'name'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
| |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import imp
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import base
import routes
import six
import webob.dec
import webob.exc
from neutron._i18n import _, _LE, _LI, _LW
from neutron.common import exceptions
import neutron.extensions
from neutron import manager
from neutron.plugins.common import constants as const
from neutron.services import provider_configuration
from neutron import wsgi
LOG = logging.getLogger(__name__)
EXTENSION_SUPPORTED_CHECK_MAP = {}
_PLUGIN_AGNOSTIC_EXTENSIONS = set()
def register_custom_supported_check(alias, f, plugin_agnostic=False):
'''Register a custom function to determine if extension is supported.
Consequent calls for the same alias replace the registered function.
:param alias: API extension alias name
:param f: custom check function that returns True if extension is supported
:param plugin_agnostic: if False, don't require a plugin to claim support
with supported_extension_aliases. If True, a plugin must claim the
extension is supported.
'''
EXTENSION_SUPPORTED_CHECK_MAP[alias] = f
if plugin_agnostic:
_PLUGIN_AGNOSTIC_EXTENSIONS.add(alias)
@six.add_metaclass(abc.ABCMeta)
class PluginInterface(object):
@classmethod
def __subclasshook__(cls, klass):
"""Checking plugin class.
The __subclasshook__ method is a class method
that will be called every time a class is tested
using issubclass(klass, PluginInterface).
In that case, it will check that every method
marked with the abstractmethod decorator is
provided by the plugin class.
"""
if not cls.__abstractmethods__:
return NotImplemented
for method in cls.__abstractmethods__:
if any(method in base.__dict__ for base in klass.__mro__):
continue
return NotImplemented
return True
@six.add_metaclass(abc.ABCMeta)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions."""
@abc.abstractmethod
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
@abc.abstractmethod
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
@abc.abstractmethod
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
@abc.abstractmethod
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_request_extensions(self):
"""List of extensions.RequestException extension objects.
Request extensions are used to handle custom request data.
"""
request_exts = []
return request_exts
def get_extended_resources(self, version):
"""Retrieve extended resources or attributes for core resources.
Extended attributes are implemented by a core plugin similarly
to the attributes defined in the core, and can appear in
request and response messages. Their names are scoped with the
extension's prefix. The core API version is passed to this
function, which must return a
map[<resource_name>][<attribute_name>][<attribute_property>]
specifying the extended resource attribute properties required
by that API version.
Extension can add resources and their attr definitions too.
The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
"""
return {}
def get_plugin_interface(self):
"""Returns an abstract class which defines contract for the plugin.
The abstract class should inherit from extensions.PluginInterface,
Methods in this abstract class should be decorated as abstractmethod
"""
return None
def get_required_extensions(self):
"""Returns a list of extensions to be processed before this one."""
return []
def get_optional_extensions(self):
"""Returns a list of extensions to be processed before this one.
Unlike get_required_extensions. This will not fail the loading of
the extension if one of these extensions is not present. This is
useful for an extension that extends multiple resources across
other extensions that should still work for the remaining extensions
when one is missing.
"""
return []
def update_attributes_map(self, extended_attributes,
extension_attrs_map=None):
"""Update attributes map for this extension.
This is default method for extending an extension's attributes map.
An extension can use this method and supplying its own resource
attribute map in extension_attrs_map argument to extend all its
attributes that needs to be extended.
If an extension does not implement update_attributes_map, the method
does nothing and just return.
"""
if not extension_attrs_map:
return
for resource, attrs in six.iteritems(extension_attrs_map):
extended_attrs = extended_attributes.get(resource)
if extended_attrs:
attrs.update(extended_attrs)
def get_pecan_resources(self):
"""List of PecanResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
The controllers associated with each instance of
extensions.ResourceExtension should be a subclass of
neutron.pecan_wsgi.controllers.utils.NeutronPecanController.
If a resource is defined in both get_resources and get_pecan_resources,
the resource defined in get_pecan_resources will take precedence.
"""
return []
class ActionExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, request, id):
input_dict = self._deserialize(request.body,
request.get_content_type())
for action_name, handler in six.iteritems(self.action_handlers):
if action_name in input_dict:
return handler(input_dict, request, id)
# no action handler found (bump to downstream application)
response = self.application
return response
class RequestExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, request, *args, **kwargs):
res = request.get_response(self.application)
# currently request handlers are un-ordered
for handler in self.handlers:
response = handler(request, res)
return response
class ExtensionController(wsgi.Controller):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
@staticmethod
def _translate(ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, request):
extensions = []
for _alias, ext in six.iteritems(self.extension_manager.extensions):
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, request, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, request, id):
msg = _('Resource not found.')
raise webob.exc.HTTPNotFound(msg)
def create(self, request):
msg = _('Resource not found.')
raise webob.exc.HTTPNotFound(msg)
class ExtensionMiddleware(base.ConfigurableMiddleware):
"""Extensions middleware for WSGI."""
def __init__(self, application,
ext_mgr=None):
self.ext_mgr = (ext_mgr
or ExtensionManager(get_extensions_path()))
mapper = routes.Mapper()
# extended resources
for resource in self.ext_mgr.get_resources():
path_prefix = resource.path_prefix
if resource.parent:
path_prefix = (resource.path_prefix +
"/%s/{%s_id}" %
(resource.parent["collection_name"],
resource.parent["member_name"]))
LOG.debug('Extended resource: %s',
resource.collection)
for action, method in six.iteritems(resource.collection_actions):
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path_prefix + path, path)
submap.connect(path_prefix + path + "_format",
"%s.:(format)" % path)
for action, method in resource.collection_methods.items():
conditions = dict(method=[method])
path = "/%s" % resource.collection
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path_prefix + path, path)
submap.connect(path_prefix + path + "_format",
"%s.:(format)" % path)
mapper.resource(resource.collection, resource.collection,
controller=resource.controller,
member=resource.member_actions,
parent_resource=resource.parent,
path_prefix=path_prefix)
# extended actions
action_controllers = self._action_ext_controllers(application,
self.ext_mgr, mapper)
for action in self.ext_mgr.get_actions():
LOG.debug('Extended action: %s', action.action_name)
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
# extended requests
req_controllers = self._request_ext_controllers(application,
self.ext_mgr, mapper)
for request_ext in self.ext_mgr.get_request_extensions():
LOG.debug('Extended request: %s', request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_controllers(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionController-s by collection."""
action_controllers = {}
for action in ext_mgr.get_actions():
if action.collection not in action_controllers.keys():
controller = ActionExtensionController(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
action_controllers[action.collection] = controller
return action_controllers
def _request_ext_controllers(self, application, ext_mgr, mapper):
"""Returns a dict of RequestExtensionController-s by collection."""
request_ext_controllers = {}
for req_ext in ext_mgr.get_request_extensions():
if req_ext.key not in request_ext_controllers.keys():
controller = RequestExtensionController(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=controller,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=controller,
conditions=req_ext.conditions)
request_ext_controllers[req_ext.key] = controller
return request_ext_controllers
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Route the incoming request with router."""
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
"""Dispatch the request.
Returns the routed WSGI app's response or defers to the extended
application.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
def plugin_aware_extension_middleware_factory(global_config, **local_config):
"""Paste factory."""
def _factory(app):
ext_mgr = PluginAwareExtensionManager.get_instance()
return ExtensionMiddleware(app, ext_mgr=ext_mgr)
return _factory
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See tests/unit/extensions/foxinsocks.py for an
example extension implementation.
"""
def __init__(self, path):
LOG.info(_LI('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionController(self)))
for ext in self.extensions.values():
resources.extend(ext.get_resources())
return resources
def get_pecan_resources(self):
"""Returns a list of PecanResourceExtension objects."""
resources = []
for ext in self.extensions.values():
# TODO(blogan): this is being called because there are side effects
# that the get_resources method does, like registering plural
# mappings and quotas. The side effects that get_resources does
# should probably be moved to another extension method, but that
# should be done some other time.
ext.get_resources()
resources.extend(ext.get_pecan_resources())
return resources
def get_actions(self):
"""Returns a list of ActionExtension objects."""
actions = []
for ext in self.extensions.values():
actions.extend(ext.get_actions())
return actions
def get_request_extensions(self):
"""Returns a list of RequestExtension objects."""
request_exts = []
for ext in self.extensions.values():
request_exts.extend(ext.get_request_extensions())
return request_exts
def extend_resources(self, version, attr_map):
"""Extend resources with additional resources or attributes.
:param attr_map: the existing mapping from resource name to
attrs definition.
After this function, we will extend the attr_map if an extension
wants to extend this map.
"""
processed_exts = {}
exts_to_process = self.extensions.copy()
check_optionals = True
# Iterate until there are unprocessed extensions or if no progress
# is made in a whole iteration
while exts_to_process:
processed_ext_count = len(processed_exts)
for ext_name, ext in list(exts_to_process.items()):
# Process extension only if all required extensions
# have been processed already
required_exts_set = set(ext.get_required_extensions())
if required_exts_set - set(processed_exts):
continue
optional_exts_set = set(ext.get_optional_extensions())
if check_optionals and optional_exts_set - set(processed_exts):
continue
extended_attrs = ext.get_extended_resources(version)
for res, resource_attrs in six.iteritems(extended_attrs):
attr_map.setdefault(res, {}).update(resource_attrs)
processed_exts[ext_name] = ext
del exts_to_process[ext_name]
if len(processed_exts) == processed_ext_count:
# if we hit here, it means there are unsatisfied
# dependencies. try again without optionals since optionals
# are only necessary to set order if they are present.
if check_optionals:
check_optionals = False
continue
# Exit loop as no progress was made
break
if exts_to_process:
unloadable_extensions = set(exts_to_process.keys())
LOG.error(_LE("Unable to process extensions (%s) because "
"the configured plugins do not satisfy "
"their requirements. Some features will not "
"work as expected."),
', '.join(unloadable_extensions))
self._check_faulty_extensions(unloadable_extensions)
# Extending extensions' attributes map.
for ext in processed_exts.values():
ext.update_attributes_map(attr_map)
def _check_faulty_extensions(self, faulty_extensions):
"""Raise for non-default faulty extensions.
Gracefully fail for defective default extensions, which will be
removed from the list of loaded extensions.
"""
default_extensions = set(const.DEFAULT_SERVICE_PLUGINS.values())
if not faulty_extensions <= default_extensions:
raise exceptions.ExtensionsNotFound(
extensions=list(faulty_extensions))
else:
# Remove the faulty extensions so that they do not show during
# ext-list
for ext in faulty_extensions:
try:
del self.extensions[ext]
except KeyError:
pass
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.get_name())
LOG.debug('Ext alias: %s', extension.get_alias())
LOG.debug('Ext description: %s', extension.get_description())
LOG.debug('Ext updated: %s', extension.get_updated())
except AttributeError:
LOG.exception(_LE("Exception loading extension"))
return False
return isinstance(extension, ExtensionDescriptor)
def _load_all_extensions(self):
"""Load extensions from the configured path.
The extension name is constructed from the module_name. If your
extension module is named widgets.py, the extension class within that
module should be 'Widgets'.
See tests/unit/extensions/foxinsocks.py for an example extension
implementation.
"""
for path in self.path.split(':'):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error(_LE("Extension path '%s' doesn't exist!"), path)
def _load_all_extensions_from_path(self, path):
# Sorting the extension list makes the order in which they
# are loaded predictable across a cluster of load-balanced
# Neutron Servers
for f in sorted(os.listdir(path)):
try:
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warning(_LW('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
"%(exception)s"),
{'f': f, 'exception': exception})
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.info(_LI('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exceptions.DuplicatedExtension(alias=alias)
self.extensions[alias] = ext
class PluginAwareExtensionManager(ExtensionManager):
_instance = None
def __init__(self, path, plugins):
self.plugins = plugins
super(PluginAwareExtensionManager, self).__init__(path)
self.check_if_plugin_extensions_loaded()
def _check_extension(self, extension):
"""Check if an extension is supported by any plugin."""
extension_is_valid = super(PluginAwareExtensionManager,
self)._check_extension(extension)
if not extension_is_valid:
return False
alias = extension.get_alias()
if alias in EXTENSION_SUPPORTED_CHECK_MAP:
return EXTENSION_SUPPORTED_CHECK_MAP[alias]()
return (self._plugins_support(extension) and
self._plugins_implement_interface(extension))
def _plugins_support(self, extension):
alias = extension.get_alias()
supports_extension = alias in self.get_supported_extension_aliases()
if not supports_extension:
LOG.info(_LI("Extension %s not supported by any of loaded "
"plugins"),
alias)
return supports_extension
def _plugins_implement_interface(self, extension):
if extension.get_plugin_interface() is None:
return True
for plugin in self.plugins.values():
if isinstance(plugin, extension.get_plugin_interface()):
return True
LOG.warning(_LW("Loaded plugins do not implement extension "
"%s interface"),
extension.get_alias())
return False
@classmethod
def get_instance(cls):
if cls._instance is None:
service_plugins = manager.NeutronManager.get_service_plugins()
cls._instance = cls(get_extensions_path(service_plugins),
service_plugins)
return cls._instance
def get_plugin_supported_extension_aliases(self, plugin):
"""Return extension aliases supported by a given plugin"""
aliases = set()
# we also check all classes that the plugins inherit to see if they
# directly provide support for an extension
for item in [plugin] + plugin.__class__.mro():
try:
aliases |= set(
getattr(item, "supported_extension_aliases", []))
except TypeError:
# we land here if a class has a @property decorator for
# supported extension aliases. They only work on objects.
pass
return aliases
def get_supported_extension_aliases(self):
"""Gets extension aliases supported by all plugins."""
aliases = set()
for plugin in self.plugins.values():
aliases |= self.get_plugin_supported_extension_aliases(plugin)
aliases |= {
alias
for alias, func in EXTENSION_SUPPORTED_CHECK_MAP.items()
if func()
}
return aliases
@classmethod
def clear_instance(cls):
cls._instance = None
def check_if_plugin_extensions_loaded(self):
"""Check if an extension supported by a plugin has been loaded."""
plugin_extensions = self.get_supported_extension_aliases()
missing_aliases = plugin_extensions - set(self.extensions)
missing_aliases -= _PLUGIN_AGNOSTIC_EXTENSIONS
if missing_aliases:
raise exceptions.ExtensionsNotFound(
extensions=list(missing_aliases))
class RequestExtension(object):
"""Extend requests and responses of core Neutron OpenStack API controllers.
Provide a way to add data to responses and handle custom request data
that is sent to core Neutron OpenStack API controllers.
"""
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
"""Add custom actions to core Neutron OpenStack API controllers."""
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in Neutron."""
def __init__(self, collection, controller, parent=None, path_prefix="",
collection_actions=None, member_actions=None, attr_map=None,
collection_methods=None):
collection_actions = collection_actions or {}
collection_methods = collection_methods or {}
member_actions = member_actions or {}
attr_map = attr_map or {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.collection_methods = collection_methods
self.member_actions = member_actions
self.path_prefix = path_prefix
self.attr_map = attr_map
# Returns the extension paths from a config entry and the __path__
# of neutron.extensions
def get_extensions_path(service_plugins=None):
paths = collections.OrderedDict()
# Add Neutron core extensions
paths[neutron.extensions.__path__[0]] = 1
if service_plugins:
# Add Neutron *-aas extensions
for plugin in service_plugins.values():
neutron_mod = provider_configuration.NeutronModule(
plugin.__module__.split('.')[0])
try:
paths[neutron_mod.module().extensions.__path__[0]] = 1
except AttributeError:
# Occurs normally if module has no extensions sub-module
pass
# Add external/other plugins extensions
if cfg.CONF.api_extensions_path:
for path in cfg.CONF.api_extensions_path.split(":"):
paths[path] = 1
LOG.debug("get_extension_paths = %s", paths)
# Re-build the extension string
path = ':'.join(paths)
return path
def append_api_extensions_path(paths):
paths = list(set([cfg.CONF.api_extensions_path] + paths))
cfg.CONF.set_override('api_extensions_path',
':'.join([p for p in paths if p]))
| |
from otp.ai.AIBase import *
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from ElevatorConstants import *
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
class DistributedElevatorAI(DistributedObjectAI.DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedElevatorAI')
def __init__(self, air, bldg, numSeats = 4, antiShuffle = 0, minLaff = 0):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.type = ELEVATOR_NORMAL
self.countdownTime = ElevatorData[self.type]['countdown']
self.bldg = bldg
self.bldgDoId = bldg.getDoId()
self.seats = []
self.setAntiShuffle(antiShuffle)
self.setMinLaff(minLaff)
if self.antiShuffle:
if not hasattr(simbase.air, 'elevatorTripId'):
simbase.air.elevatorTripId = 1
self.elevatorTripId = simbase.air.elevatorTripId
simbase.air.elevatorTripId += 1
else:
self.elevatorTripId = 0
for seat in xrange(numSeats):
self.seats.append(None)
self.accepting = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedElevatorAI', [State.State('off', self.enterOff, self.exitOff, ['opening', 'closed']),
State.State('opening', self.enterOpening, self.exitOpening, ['waitEmpty', 'waitCountdown']),
State.State('waitEmpty', self.enterWaitEmpty, self.exitWaitEmpty, ['waitCountdown']),
State.State('waitCountdown', self.enterWaitCountdown, self.exitWaitCountdown, ['waitEmpty', 'allAboard']),
State.State('allAboard', self.enterAllAboard, self.exitAllAboard, ['closing', 'waitEmpty']),
State.State('closing', self.enterClosing, self.exitClosing, ['closed', 'waitEmpty']),
State.State('closed', self.enterClosed, self.exitClosed, ['opening'])], 'off', 'off')
self.fsm.enterInitialState()
self.boardingParty = None
return
def delete(self):
self.fsm.requestFinalState()
del self.fsm
del self.bldg
self.ignoreAll()
DistributedObjectAI.DistributedObjectAI.delete(self)
def setBoardingParty(self, party):
self.boardingParty = party
def generate(self):
self.start()
DistributedObjectAI.DistributedObjectAI.generate(self)
def getBldgDoId(self):
return self.bldgDoId
def findAvailableSeat(self):
for i in xrange(len(self.seats)):
if self.seats[i] == None:
return i
def findAvatar(self, avId):
for i in xrange(len(self.seats)):
if self.seats[i] == avId:
return i
def countFullSeats(self):
avCounter = 0
for i in self.seats:
if i:
avCounter += 1
return avCounter
def countOpenSeats(self):
openSeats = 0
for i in xrange(len(self.seats)):
if self.seats[i] is None:
openSeats += 1
return openSeats
def rejectingBoardersHandler(self, avId, reason = 0, wantBoardingShow = 0):
self.rejectBoarder(avId, reason)
def rejectBoarder(self, avId, reason = 0):
self.sendUpdateToAvatarId(avId, 'rejectBoard', [avId, reason])
def acceptingBoardersHandler(self, avId, reason = 0, wantBoardingShow = 0):
self.notify.debug('acceptingBoardersHandler')
seatIndex = self.findAvailableSeat()
if seatIndex == None:
self.rejectBoarder(avId, REJECT_NOSEAT)
else:
self.acceptBoarder(avId, seatIndex, wantBoardingShow)
return
def acceptBoarder(self, avId, seatIndex, wantBoardingShow = 0):
self.notify.debug('acceptBoarder')
if self.findAvatar(avId) != None:
return
self.seats[seatIndex] = avId
self.timeOfBoarding = globalClock.getRealTime()
if wantBoardingShow:
self.timeOfGroupBoarding = globalClock.getRealTime()
self.sendUpdate('fillSlot' + str(seatIndex), [avId, wantBoardingShow])
if self.fsm.getCurrentState().getName() == 'waitEmpty':
self.fsm.request('waitCountdown')
elif self.fsm.getCurrentState().getName() == 'waitCountdown' and self.findAvailableSeat() is None:
self.fsm.request('allAboard')
return
def rejectingExitersHandler(self, avId):
self.rejectExiter(avId)
def rejectExiter(self, avId):
pass
def acceptingExitersHandler(self, avId):
self.acceptExiter(avId)
def clearEmptyNow(self, seatIndex):
self.sendUpdate('emptySlot' + str(seatIndex), [0,
0,
globalClockDelta.getRealNetworkTime(),
0])
def clearFullNow(self, seatIndex):
avId = self.seats[seatIndex]
if avId == None:
self.notify.warning('Clearing an empty seat index: ' + str(seatIndex) + ' ... Strange...')
else:
self.seats[seatIndex] = None
self.sendUpdate('fillSlot' + str(seatIndex), [0, 0])
self.ignore(self.air.getAvatarExitEvent(avId))
return
def d_setState(self, state):
self.sendUpdate('setState', [state, globalClockDelta.getRealNetworkTime()])
def getState(self):
return self.fsm.getCurrentState().getName()
def avIsOKToBoard(self, av):
return av.hp > self.minLaff and self.accepting
def checkBoard(self, av):
if av.hp < self.minLaff:
return REJECT_MINLAFF
return 0
def requestBoard(self, *args):
self.notify.debug('requestBoard')
avId = self.air.getAvatarIdFromSender()
if self.findAvatar(avId) != None:
self.notify.warning('Ignoring multiple requests from %s to board.' % avId)
return
av = self.air.doId2do.get(avId)
if av:
boardResponse = self.checkBoard(av)
newArgs = (avId,) + args + (boardResponse,)
if self.boardingParty and self.boardingParty.hasActiveGroup(avId) and self.boardingParty.getGroupLeader(avId) != avId:
self.notify.warning('Rejecting %s from boarding the elevator because he is already part of a Boarding Group.' % avId)
self.rejectingBoardersHandler(*newArgs)
return
if boardResponse == 0:
self.acceptingBoardersHandler(*newArgs)
else:
self.rejectingBoardersHandler(*newArgs)
else:
self.notify.warning('avid: %s does not exist, but tried to board an elevator' % avId)
return
def partyAvatarBoard(self, avatar, wantBoardingShow = 0):
av = avatar
avId = avatar.doId
if self.findAvatar(avId) != None:
self.notify.warning('Ignoring multiple requests from %s to board.' % avId)
return
if av:
boardResponse = self.checkBoard(av)
newArgs = (avId,) + (boardResponse,) + (wantBoardingShow,)
if boardResponse == 0:
self.acceptingBoardersHandler(*newArgs)
else:
self.rejectingBoardersHandler(*newArgs)
else:
self.notify.warning('avid: %s does not exist, but tried to board an elevator' % avId)
return
def requestExit(self, *args):
self.notify.debug('requestExit')
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
newArgs = (avId,) + args
if self.accepting:
self.acceptingExitersHandler(*newArgs)
else:
self.rejectingExitersHandler(*newArgs)
else:
self.notify.warning('avId: %s does not exist, but tried to exit an elevator' % avId)
def start(self):
self.open()
def enterOff(self):
self.accepting = 0
self.timeOfBoarding = None
self.timeOfGroupBoarding = None
if hasattr(self, 'doId'):
for seatIndex in xrange(len(self.seats)):
taskMgr.remove(self.uniqueName('clearEmpty-' + str(seatIndex)))
return
def exitOff(self):
self.accepting = 0
def open(self):
self.fsm.request('opening')
def enterOpening(self):
self.d_setState('opening')
self.accepting = 0
for seat in self.seats:
seat = None
def exitOpening(self):
self.accepting = 0
taskMgr.remove(self.uniqueName('opening-timer'))
def enterWaitCountdown(self):
self.d_setState('waitCountdown')
self.accepting = 1
def exitWaitCountdown(self):
print 'exit wait countdown'
self.accepting = 0
taskMgr.remove(self.uniqueName('countdown-timer'))
self.newTrip()
def enterAllAboard(self):
self.accepting = 0
def exitAllAboard(self):
self.accepting = 0
taskMgr.remove(self.uniqueName('waitForAllAboard'))
def enterClosing(self):
self.d_setState('closing')
self.accepting = 0
def exitClosing(self):
self.accepting = 0
taskMgr.remove(self.uniqueName('closing-timer'))
def enterClosed(self):
self.d_setState('closed')
def exitClosed(self):
pass
def enterWaitEmpty(self):
self.d_setState('waitEmpty')
self.accepting = 1
def exitWaitEmpty(self):
self.accepting = 0
def setElevatorTripId(self, id):
self.elevatorTripId = id
def getElevatorTripId(self):
return self.elevatorTripId
def newTrip(self):
if self.antiShuffle:
self.elevatorTripId = simbase.air.elevatorTripId
if simbase.air.elevatorTripId > 2100000000:
simbase.air.elevatorTripId = 1
simbase.air.elevatorTripId += 1
self.sendUpdate('setElevatorTripId', [self.elevatorTripId])
def setAntiShuffle(self, antiShuffle):
self.antiShuffle = antiShuffle
def getAntiShuffle(self):
return self.antiShuffle
def setMinLaff(self, minLaff):
self.minLaff = minLaff
def getMinLaff(self):
return self.minLaff
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.aws_hook import AwsHook
from six import BytesIO
from urllib.parse import urlparse
import re
import fnmatch
class S3Hook(AwsHook):
"""
Interact with AWS S3, using the boto3 library.
"""
def get_conn(self):
return self.get_client_type('s3')
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "%s"' % s3url)
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except:
return False
def get_bucket(self, bucket_name):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
s3 = self.get_resource_type('s3')
return s3.Bucket(bucket_name)
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for p in page['CommonPrefixes']:
prefixes.append(p['Prefix'])
if has_results:
return prefixes
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except:
return False
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization={'CSV': {}},
output_serialization={'CSV': {}}):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: str
:param output_serialization: S3 Select output data serialization format
:type output_serialization: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload']
for event in response['Payload']
if 'Records' in event)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if klist:
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args={}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt)
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args={}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
| |
__author__ = 'deathowl'
import uuid
import hmac
from hashlib import sha1
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
from uuidfield import UUIDField
from django.core.exceptions import ValidationError
from schedule.models import Calendar
from django.contrib.auth import models as auth_models
from django.db.models import signals
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(max_length=40, primary_key=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
unique = uuid.uuid4()
return hmac.new(unique.bytes, digestmod=sha1).hexdigest()
def __unicode__(self):
return self.key
def __str__(self):
return self.key
@python_2_unicode_compatible
class SchedulePolicy(models.Model):
"""
Schedule policy
"""
name = models.CharField(max_length=80, unique=True)
repeat_times = models.IntegerField()
class Meta:
verbose_name = _('schedule_policy')
verbose_name_plural = _('schedule_policies')
def __str__(self):
return self.name
def natural_key(self):
return (self.name)
@python_2_unicode_compatible
class Service(models.Model):
"""
Incidents are representations of a malfunction in the system.
"""
name = models.CharField(max_length=80, unique=True)
id = UUIDField(primary_key=True, auto=True)
retry = models.IntegerField(blank=True, null=True)
policy = models.ForeignKey(SchedulePolicy, blank=True, null=True)
escalate_after = models.IntegerField(blank=True, null=True)
notifications_disabled = models.BooleanField(default=False)
class Meta:
verbose_name = _('service')
verbose_name_plural = _('service')
def __str__(self):
return self.name
def natural_key(self):
return (self.id)
@python_2_unicode_compatible
class EventLog(models.Model):
"""
Event Log
"""
ACTIONS = (('acknowledge', 'acknowledge'),
('resolve', 'resolve'),
('silence_service', 'silence service'),
('unsilence_service', 'unsilence service'),
('silence_incident', 'silence incident'),
('unsilence_incident', 'unsilence incident'),
('forward', 'forward'),
('log', 'log'),
('notified','notified'),
('notification_failed', 'notification failed'),
('trigger', 'trigger'))
@property
def color(self):
colort_dict = {'acknowledge': 'warning',
'resolve': 'success',
'silence_service': 'active',
'unsilence_service': 'active',
'silence_incident': 'active',
'unsilence_incident': 'active',
'forward': 'info',
'trigger': 'trigger',
'notified': 'success',
'notification_failed': 'danger',
'log': ''}
return colort_dict[self.action]
user = models.ForeignKey(User, blank=True, default=None, null=True, related_name='users')
incident_key = models.ForeignKey('Incident', blank=True, null=True)
action = models.CharField(choices=ACTIONS, default='log', max_length="100")
service_key = models.ForeignKey(Service)
data = models.TextField()
occurred_at = models.DateTimeField()
class Meta:
verbose_name = _('eventlog')
verbose_name_plural = _('eventlog')
def __str__(self):
return self.data
def natural_key(self):
return (self.service_key, self.id)
@python_2_unicode_compatible
class Incident(models.Model):
TRIGGER = "trigger"
RESOLVE = "resolve"
ACKNOWLEDGE = "acknowledge"
"""
Incidents are representations of a malfunction in the system.
"""
service_key = models.ForeignKey(Service)
incident_key = models.CharField(max_length=200)
event_type = models.CharField(max_length=15)
description = models.CharField(max_length=200)
details = models.TextField()
occurred_at = models.DateTimeField()
@property
def color(self):
colort_dict = {'acknowledge': 'warning',
'resolve': 'success',
'silence_service': 'active',
'silence_incident': 'active',
'forward': 'info',
'trigger': 'trigger',
'log': ''}
return colort_dict[self.event_type]
class Meta:
verbose_name = _('incidents')
verbose_name_plural = _('incidents')
unique_together = (("service_key", "incident_key"),)
def __str__(self):
return self.incident_key
def natural_key(self):
return (self.service_key, self.incident_key)
def clean(self):
if self.event_type not in ['trigger', 'acknowledge', 'resolve']:
raise ValidationError("'%s' is an invalid event type, valid values are 'trigger', 'acknowledge' and 'resolve'" % self.event_type)
@python_2_unicode_compatible
class ServiceTokens(models.Model):
"""
Service tokens
"""
name = models.CharField(max_length=80)
service_id = models.ForeignKey(Service)
token_id = models.ForeignKey(Token)
class Meta:
verbose_name = _('service_tokens')
verbose_name_plural = _('service_tokens')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SchedulePolicyRule(models.Model):
"""
Schedule rule
"""
schedule_policy = models.ForeignKey(SchedulePolicy, related_name='rules')
position = models.IntegerField()
user_id = models.ForeignKey(User, blank=True, null=True)
schedule = models.ForeignKey(Calendar, blank=True, null=True)
escalate_after = models.IntegerField()
class Meta:
verbose_name = _('schedule_policy_rule')
verbose_name_plural = _('schedule_policy_rules')
def __str__(self):
return str(self.id)
@classmethod
def getRulesForService(cls, service):
return cls.objects.filter(schedule_policy=service.policy)
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', related_name='profile')
phone_number = models.CharField(max_length=50)
pushover_user_key = models.CharField(max_length=50)
pushover_app_key = models.CharField(max_length=50)
slack_room_name = models.CharField(max_length=50)
prowl_api_key = models.CharField(max_length=50, blank=True)
prowl_application = models.CharField(max_length=256, blank=True)
prowl_url = models.CharField(max_length=512, blank=True)
rocket_webhook_url = models.CharField(max_length=512, blank=True)
class ServiceSilenced(models.Model):
service = models.ForeignKey(Service)
silenced = models.BooleanField(default=False)
silenced_until = models.DateTimeField()
class IncidentSilenced(models.Model):
incident = models.ForeignKey(Incident)
silenced = models.BooleanField(default=False)
silenced_until = models.DateTimeField()
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
signals.post_save.connect(create_user_profile, sender=User)
signals.post_syncdb.disconnect(
sender=auth_models,
dispatch_uid='django.contrib.auth.management.create_superuser')
| |
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import errors, types
from numba import typeof
from numba.tests.support import TestCase, MemoryLeakMixin, tag
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
force_pyobj_flags = Flags()
force_pyobj_flags.force_pyobject = True
no_pyobj_flags = Flags()
no_pyobj_flags.nrt = True
def unpack_list(l):
a, b, c = l
return (a, b, c)
def unpack_shape(a):
x, y, z = a.shape
return x + y + z
def unpack_range():
a, b, c = range(3)
return a + b + c
def unpack_range_too_small():
a, b, c = range(2)
return a + b + c
def unpack_range_too_large():
a, b, c = range(4)
return a + b + c
def unpack_tuple():
a, b, c = (1, 2, 3)
return a + b + c
def unpack_tuple_too_small():
a, b, c = (1, 2)
return a + b + c
def unpack_tuple_too_large():
a, b, c = (1, 2, 3, 4)
return a + b + c
def unpack_heterogeneous_tuple_too_small():
a, b, c = (1, 2.5j)
return a + b + c
def unpack_heterogeneous_tuple_too_large():
a, b, c = (1, 2.5, 3j, 4)
return a + b + c
def unpack_heterogeneous_tuple():
a, b, c = (1, 2.5, 3j)
return a + b + c
def unpack_nested_heterogeneous_tuple():
a, (b, c) = (1, (2.5, 3j))
return a + b + c
def unpack_arbitrary(seq):
a, b = seq
return b, a
def unpack_nrt():
a = np.zeros(1)
b = np.zeros(2)
tup = b, a
alpha, beta = tup
return alpha, beta
def chained_unpack_assign1(x, y):
# Used to fail in object mode (issue #580)
a = (b, c) = (x, y)
(d, e) = a
return d + e + b + c
def conditional_swap(x, y):
# Used to produce invalid code (issue #977)
if x > 0:
x, y = y, x
return x, y
class TestUnpack(MemoryLeakMixin, TestCase):
def test_unpack_list(self):
pyfunc = unpack_list
cr = compile_isolated(pyfunc, (), flags=force_pyobj_flags)
cfunc = cr.entry_point
l = [1, 2, 3]
self.assertEqual(cfunc(l), pyfunc(l))
def test_unpack_shape(self, flags=force_pyobj_flags):
pyfunc = unpack_shape
cr = compile_isolated(pyfunc, [types.Array(dtype=types.int32,
ndim=3,
layout='C')],
flags=flags)
cfunc = cr.entry_point
a = np.zeros(shape=(1, 2, 3)).astype(np.int32)
self.assertPreciseEqual(cfunc(a), pyfunc(a))
def test_unpack_shape_npm(self):
self.test_unpack_shape(flags=no_pyobj_flags)
def test_unpack_range(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_range, flags)
def test_unpack_range_npm(self):
self.test_unpack_range(flags=no_pyobj_flags)
def test_unpack_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_tuple, flags)
def test_unpack_tuple_npm(self):
self.test_unpack_tuple(flags=no_pyobj_flags)
def test_unpack_heterogeneous_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_heterogeneous_tuple, flags)
def test_unpack_heterogeneous_tuple_npm(self):
self.test_unpack_heterogeneous_tuple(flags=no_pyobj_flags)
def test_unpack_nested_heterogeneous_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_nested_heterogeneous_tuple, flags)
def test_unpack_nested_heterogeneous_tuple_npm(self):
self.test_unpack_nested_heterogeneous_tuple(flags=no_pyobj_flags)
def test_chained_unpack_assign(self, flags=force_pyobj_flags):
pyfunc = chained_unpack_assign1
cr = compile_isolated(pyfunc, [types.int32, types.int32],
flags=flags)
cfunc = cr.entry_point
args = (4, 5)
self.assertPreciseEqual(cfunc(*args), pyfunc(*args))
def test_chained_unpack_assign_npm(self):
self.test_chained_unpack_assign(flags=no_pyobj_flags)
def check_unpack_error(self, pyfunc, flags=force_pyobj_flags, exc=ValueError):
with self.assertRaises(exc):
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
cfunc()
def test_unpack_tuple_too_small(self):
self.check_unpack_error(unpack_tuple_too_small)
self.check_unpack_error(unpack_heterogeneous_tuple_too_small)
def test_unpack_tuple_too_small_npm(self):
self.check_unpack_error(unpack_tuple_too_small, no_pyobj_flags,
errors.TypingError)
self.check_unpack_error(unpack_heterogeneous_tuple_too_small,
no_pyobj_flags, errors.TypingError)
def test_unpack_tuple_too_large(self):
self.check_unpack_error(unpack_tuple_too_large)
self.check_unpack_error(unpack_heterogeneous_tuple_too_large)
def test_unpack_tuple_too_large_npm(self):
self.check_unpack_error(unpack_tuple_too_large, no_pyobj_flags,
errors.TypingError)
self.check_unpack_error(unpack_heterogeneous_tuple_too_large,
no_pyobj_flags, errors.TypingError)
def test_unpack_range_too_small(self):
self.check_unpack_error(unpack_range_too_small)
def test_unpack_range_too_small_npm(self):
self.check_unpack_error(unpack_range_too_small, no_pyobj_flags)
def test_unpack_range_too_large(self):
self.check_unpack_error(unpack_range_too_large)
def test_unpack_range_too_large_npm(self):
self.check_unpack_error(unpack_range_too_large, no_pyobj_flags)
def check_conditional_swap(self, flags=force_pyobj_flags):
cr = compile_isolated(conditional_swap, (types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(4, 5), (5, 4))
self.assertPreciseEqual(cfunc(0, 5), (0, 5))
def test_conditional_swap(self):
self.check_conditional_swap()
def test_conditional_swap_npm(self):
self.check_conditional_swap(no_pyobj_flags)
def test_unpack_tuple_of_arrays(self):
tup = tuple(np.zeros(i + 1) for i in range(2))
tupty = typeof(tup)
pyfunc = unpack_arbitrary
cr = compile_isolated(pyfunc, (tupty,),
flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(tup), pyfunc(tup))
def test_unpack_nrt(self):
pyfunc = unpack_nrt
cr = compile_isolated(pyfunc, (), flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(), pyfunc())
def test_invalid_unpack(self):
pyfunc = unpack_arbitrary
with self.assertRaises(errors.TypingError) as raises:
compile_isolated(pyfunc, (types.int32,), flags=no_pyobj_flags)
self.assertIn("failed to unpack int32", str(raises.exception))
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
Launches a GUI mask builder for each file associated with the targets specidied
by the 'targets' variable.
When the script is launched, a three pane plot of the science images will be
displayed. The center pane in this triptych is the active pane for which you are
build a mask. The following look-up table indicates which button clicks and keys
presses are associated with a given action in the GUI.
--------------------------------------------------------------------------------
Event | Effect
--------------------------------------------------------------------------------
Left Click | Apply a circular aperture mask to the clicked region
Right Click | Delete a circular aperture mask from the clicked region
1 - 6 | Set the size of the circular aperture
Enter | Save the current mask for the center pane to disk
Backspace | Reset the current mask to a blank slate
Left/Right | Change the active pane to the previous/next image
--------------------------------------------------------------------------------
To build a mask, simply click on the regions of the active pane which need to be
masked. The mask will be displayed in the center pane as a semi-opaque white
outline. You can delete regions of the mask with right clicks. Once you are
satisfied with the mask you've build, you can save it to disk with a single
stroke of the Enter key. Press the left or right arrow keys to scroll through
the images for the specified target(s), and press the Backspace key to clear the
current mask to be completely blank. Simply close the GUI window plot to end the
script.
"""
# Imports
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table, Column
from astropy.visualization import ZScaleInterval
# TODO: build a "MaskBuilder" class to manage all these variables and actions.
# Define the mask directory as a global variable
global maskDir
# Add the AstroImage class
import astroimage as ai
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all pyBDP data (index, calibration images, reduced...)
pyBDP_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyBDP_data\\201612'
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201612'
# The user can speed up the process by defining the "Target" values from
# the fileIndex to be considered for masking.
# Masks can onlybe produced for targets in this list.
targets = ['NGC2023', 'NGC7023']
# This is the location of the pyBDP processed Data
pyBDP_reducedDir = os.path.join(pyBDP_data, 'pyBDP_reduced_images')
# Setup new directory for polarimetry data
maskDir = os.path.join(pyPol_data, 'Masks')
if (not os.path.isdir(maskDir)):
os.mkdir(maskDir, 0o755)
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Determine which parts of the fileIndex pertain to on-target science images
useFiles = np.logical_and(
fileIndex['USE'] == 1,
fileIndex['DITHER_TYPE'] == 'ABBA'
)
useFiles = np.logical_and(
useFiles,
fileIndex['AB'] == 'A'
)
# Further restrict the selection to only include the selected targets
targetFiles = np.zeros((len(fileIndex),), dtype=bool)
for target in targets:
targetFiles = np.logical_or(
targetFiles,
fileIndex['TARGET'] == target
)
# Cull the fileIndex to ONLY include the specified targets
goodTargetRows = np.logical_and(useFiles, targetFiles)
targetRowInds = np.where(goodTargetRows)
fileIndex = fileIndex[targetRowInds]
#******************************************************************************
# Define the event handlers for clicking and keying on the image display
#******************************************************************************
def on_click(event):
global xList, yList, xx, yy
global fig, brushSize, axarr, maskImg, thisAxImg
x, y = event.xdata, event.ydata
# xList.append(x)
# yList.append(y)
# Compute distances from the click and update mask array
dist = np.sqrt((xx - x)**2 + (yy - y)**2)
maskInds = np.where(dist < brushSize*5)
if event.button == 1:
tmpData = maskImg.data
tmpData[maskInds] = 1
maskImg.data = tmpData
if (event.button == 2) or (event.button == 3):
tmpData = maskImg.data
tmpData[maskInds] = 0
maskImg.data = tmpData
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
def on_key(event):
global fileList, targetList, fig, imgNum, brushSize
global maskDir, maskImg
global prevImg, thisImg, nextImg
global prevAxImg, thisAxImg, nextAxImg
global prevTarget, thisTarget, nextTarget
global prevMin, thisMin, nextMin
global prevMax, thisMax, nextMax
global prevLabel, thisLabel, nextLabel
# Handle brush sizing
if event.key == '1':
brushSize = 1
elif event.key == '2':
brushSize = 2
elif event.key == '3':
brushSize = 3
elif event.key == '4':
brushSize = 4
elif event.key == '5':
brushSize = 5
elif event.key == '6':
brushSize = 6
# Increment the image number
if event.key == 'right' or event.key == 'left':
if event.key == 'right':
#Advance to the next image
imgNum += 1
# Read in the new files
prevImg = thisImg
thisImg = nextImg
nextImg = ai.ReducedScience.read(fileList[(imgNum + 1) % len(fileList)])
# Update target info
prevTarget = thisTarget
thisTarget = nextTarget
nextTarget = targetList[(imgNum + 1) % len(fileList)]
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute new image display minima
prevMin = thisMin
thisMin = nextMin
nextMin, _ = zScaleGetter.get_limits(nextImg.data)
# Compute new image display maxima
prevMax = thisMax
thisMax = nextMax
_, nextMax = zScaleGetter.get_limits(nextImg.data)
if event.key == 'left':
#Move back to the previous image
imgNum -= 1
# Read in the new files
nextImg = thisImg
thisImg = prevImg
prevImg = ai.ReducedScience.read(fileList[(imgNum - 1) % len(fileList)])
# Update target info
nextTarget = thisTarget
thisTarget = prevTarget
prevTarget = targetList[(imgNum - 1) % len(fileList)]
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute new image display minima
nextMin = thisMin
thisMin = prevMin
prevMin, _ = zScaleGetter.get_limits(prevImg.data)
# Compute new image display maxima
nextMax = thisMax
thisMax = prevMax
_, prevMax = zScaleGetter.get_limits(prevImg.data)
#*******************************
# Update the displayed mask
#*******************************
# Check which mask files might be usable...
prevMaskFile = os.path.join(maskDir,
os.path.basename(prevImg.filename))
thisMaskFile = os.path.join(maskDir,
os.path.basename(thisImg.filename))
nextMaskFile = os.path.join(maskDir,
os.path.basename(nextImg.filename))
if os.path.isfile(thisMaskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(thisMaskFile))
maskImg = ai.ReducedScience.read(thisMaskFile)
elif os.path.isfile(prevMaskFile) and (prevTarget == thisTarget):
# Otherwise check for the mask for the previous file
print('using previous mask: ',os.path.basename(prevMaskFile))
maskImg = ai.ReducedScience.read(prevMaskFile)
elif os.path.isfile(nextMaskFile) and (nextTarget == thisTarget):
# Then check for the mask of the next file
print('using next mask: ',os.path.basename(nextMaskFile))
maskImg = ai.ReducedScience.read(nextMaskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = thisImg.copy()
maskImg.filename = thisMaskFile
maskImg = maskImg.astype(np.int16)
# Make sure the uncertainty array is removed from the image
try:
del maskImg.uncertainty
except:
pass
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Reassign image display limits
prevAxImg.set_clim(vmin = prevMin, vmax = prevMax)
thisAxImg.set_clim(vmin = thisMin, vmax = thisMax)
nextAxImg.set_clim(vmin = nextMin, vmax = nextMax)
# Display the new images
prevAxImg.set_data(prevImg.data)
thisAxImg.set_data(thisImg.data)
nextAxImg.set_data(nextImg.data)
# Update the annotation
axList = fig.get_axes()
axList[1].set_title(os.path.basename(thisImg.filename))
prevStr = (str(prevImg.header['OBJECT']) + '\n' +
str(prevImg.header['FILTNME3'] + '\n' +
str(prevImg.header['POLPOS'])))
thisStr = (str(thisImg.header['OBJECT']) + '\n' +
str(thisImg.header['FILTNME3'] + '\n' +
str(thisImg.header['POLPOS'])))
nextStr = (str(nextImg.header['OBJECT']) + '\n' +
str(nextImg.header['FILTNME3'] + '\n' +
str(nextImg.header['POLPOS'])))
prevLabel.set_text(prevStr)
thisLabel.set_text(thisStr)
nextLabel.set_text(nextStr)
# Update the display
fig.canvas.draw()
# Save the generated mask
if event.key == 'enter':
# Make sure the header has the right values
maskImg.header = thisImg.header
# TODO: make sure the mask ONLY has what it needs
# i.e., remove uncertainty and convert to np.ubyte type.
# Write the mask to disk
maskBasename = os.path.basename(thisImg.filename)
maskFullname = os.path.join(maskDir, maskBasename)
print('Writing mask for file {}'.format(maskBasename))
maskImg.write(maskFullname, clobber=True)
# Clear out the mask values
if event.key == 'backspace':
# Clear out the mask array
maskImg.data = maskImg.data * np.byte(0)
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
#******************************************************************************
#******************************************************************************
# This is the main script that will load in file names and prepare for plotting
#******************************************************************************
# Declare global variables
#global xList, yList
global xx, yy
global fileList, targetList, fig, imgNum, maskImg
global prevImg, thisImg, nextImg
global prevTarget, thisTarget, nextTarget
global prevAxImg, thisAxImg, nextAxImg
global prevMin, thisMin, nextMin
global prevMax, thisMax, nextMax
global prevLabel, thisLabel, nextLabel
xList = []
yList = []
imgNum = 0 # This number will be the FIRST image to be displayed center...
brushSize = 3 # (5xbrushSize pix) is the size of the region masked
#******************************************************************************
# This script will run the mask building step of the pyPol reduction
#******************************************************************************
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
# 4. Polaroid Angle
fileIndexByTarget = fileIndex.group_by(
['TARGET', 'FILTER', 'POLPOS']
)
# Add the information to the fileList and targetList variables
fileList = fileIndexByTarget['FILENAME'].data.tolist()
targetList = fileIndexByTarget['TARGET'].data.tolist()
#*************************************
# Now prepare to plot the first images
#*************************************
# Read in an image for masking
prevImg = ai.ReducedScience.read(fileList[imgNum - 1])
thisImg = ai.ReducedScience.read(fileList[imgNum])
nextImg = ai.ReducedScience.read(fileList[imgNum + 1])
# Log the targets of the curent panes
prevTarget = targetList[imgNum - 1]
thisTarget = targetList[imgNum]
nextTarget = targetList[imgNum + 1]
###
# For some reason the prevTarget, thisTarget, and nextTaret
# variables are not accessible from the event managers the way that
# prevImg, thisImg, and nextImg are.
# I have definitely declared them to be global variables...
# Perhaps they're getting treated as local variables
# because they are modified elsewhere???
# Test if a mask has already been generated for this images
maskFile = os.path.join(maskDir, os.path.basename(thisImg.filename))
if os.path.isfile(maskFile):
# If the mask file exists, use it
maskImg = ai.ReducedScience.read(maskFile)
else:
# If the mask file does not exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = thisImg.copy()
maskImg.filename = maskFile
maskImg = maskImg.astype(np.int16)
# Generate 2D X and Y position maps
maskShape = maskImg.shape
grids = np.mgrid[0:maskShape[0], 0:maskShape[1]]
xx = grids[1]
yy = grids[0]
# Build the image displays
# Start by preparing a 1x3 plotting area
fig, axarr = plt.subplots(1, 3, sharey=True)
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute image count scaling
prevMin, prevMax = zScaleGetter.get_limits(prevImg.data)
thisMin, thisMax = zScaleGetter.get_limits(thisImg.data)
nextMin, nextMax = zScaleGetter.get_limits(nextImg.data)
# prevMin = np.median(prevImg.data) - 0.25*np.std(prevImg.data)
# prevMax = np.median(prevImg.data) + 2*np.std(prevImg.data)
# thisMin = np.median(thisImg.data) - 0.25*np.std(thisImg.data)
# thisMax = np.median(thisImg.data) + 2*np.std(thisImg.data)
# nextMin = np.median(nextImg.data) - 0.25*np.std(nextImg.data)
# nextMax = np.median(nextImg.data) + 2*np.std(nextImg.data)
# Populate each axis with its image
prevAxImg = prevImg.show(axes = axarr[0], cmap='viridis',
vmin = prevMin, vmax = prevMax, noShow = True)
thisAxImg = thisImg.show(axes = axarr[1], cmap='viridis',
vmin = thisMin, vmax = thisMax, noShow = True)
nextAxImg = nextImg.show(axes = axarr[2], cmap='viridis',
vmin = nextMin, vmax = nextMax, noShow = True)
# Add a contour of the mask array
maskContour = axarr[1].contour(xx, yy, maskImg.data,
levels=[0.5], origin='lower', colors='white', alpha = 0.2)
# Rescale the figure and setup the spacing between images
#fig.set_figheight(5.575, forward=True)
#fig.set_figwidth(17.0, forward=True)
fig.set_size_inches(17, 5.675, forward=True)
plt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.98, top = 0.96,
wspace = 0.02, hspace = 0.02)
# Add some figure annotation
thisTitle = axarr[1].set_title(os.path.basename(thisImg.filename))
prevStr = (str(prevImg.header['OBJECT']) + '\n' +
str(prevImg.header['FILTNME3'] + '\n' +
str(prevImg.header['POLPOS'])))
thisStr = (str(thisImg.header['OBJECT']) + '\n' +
str(thisImg.header['FILTNME3'] + '\n' +
str(thisImg.header['POLPOS'])))
nextStr = (str(nextImg.header['OBJECT']) + '\n' +
str(nextImg.header['FILTNME3'] + '\n' +
str(nextImg.header['POLPOS'])))
prevLabel = axarr[0].text(20, 875, prevStr,
color = 'white', size = 'medium')
thisLabel = axarr[1].text(20, 875, thisStr,
color = 'white', size = 'medium')
nextLabel = axarr[2].text(20, 875, nextStr,
color = 'white', size = 'medium')
thisShape = thisImg.shape
redLines = axarr[1].plot([thisShape[0]/2, thisShape[0]/2], [0, thisShape[1]],
'-r',
[0, thisShape[0]], [thisShape[1]/2, thisShape[1]/2],
'-r', alpha = 0.4)
#********************************************
#log this for future use!
#********************************************
# A more standard way to handle mouse clicks?
#xyList = fig.ginput(n=-1, timeout=-30, show_clicks=True,
# mouse_add=1, mouse_pop=3, mouse_stop=2)
#********************************************
# Connect the event manager...
cid1 = fig.canvas.mpl_connect('button_press_event',on_click)
cid2 = fig.canvas.mpl_connect('key_press_event', on_key)
# NOW show the image (without continuing execution)
# plt.ion()
plt.show()
# plt.ioff()
#
# pdb.set_trace()
# Disconnect the event manager and close the figure
fig.canvas.mpl_disconnect(cid1)
fig.canvas.mpl_disconnect(cid2)
# Close the plot
plt.close()
| |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import time
import json
import urllib
import tarfile
import tempfile
from contextlib import closing
import sh
import fabric.api
from fabric.api import run, sudo
from fabric.contrib.files import exists, sed
from cosmo_tester.framework.testenv import TestCase
from cosmo_tester.framework.util import get_actual_keypath
class RebootManagerTest(TestCase):
def _update_fabric_env(self):
fabric_env = fabric.api.env
fabric_env.update({
'timeout': 30,
'user': self.env.centos_7_image_user,
'key_filename': get_actual_keypath(
self.env, self.env.management_key_path),
'host_string': self.env.management_ip
})
def _reboot_server(self):
self._update_fabric_env()
return run('sudo shutdown -r +1')
def _get_undefined_services(self):
return [each['display_name']
for each in self.status if 'name' not in each]
def _get_service_names(self):
return [each['display_name']
for each in self.status]
def _get_stopped_services(self):
return [each['display_name'] for each in self.status
if each and 'instances' not in each]
def setUp(self, *args, **kwargs):
super(RebootManagerTest, self).setUp(*args, **kwargs)
self.status = self.client.manager.get_status()['services']
def is_docker_manager(self):
services = self._get_service_names()
if services.__contains__('ssh'):
return False
return True
def test_00_pre_reboot(self):
is_docker_manager = self.is_docker_manager()
if not is_docker_manager:
undefined = self._get_undefined_services()
self.assertEqual(undefined, [],
'undefined services: {0}'
.format(','.join(undefined)))
stopped = self._get_stopped_services()
self.assertEqual(stopped, [], 'stopped services: {0}'
.format(','.join(stopped)))
def test_01_during_reboot(self):
is_docker_manager = self.is_docker_manager()
pre_reboot_status = self.status
self._reboot_server()
self._wait_for_management(self.env.management_ip, timeout=180)
post_reboot_status = self.client.manager.get_status()['services']
self.assertEqual(len(pre_reboot_status), len(post_reboot_status),
"number of jobs before reboot isn\'t equal to \
number of jobs after reboot")
zipped = zip(pre_reboot_status, post_reboot_status)
for pre, post in zipped:
if is_docker_manager:
pre_display_name = pre.get('display_name')
post_display_name = pre.get('display_name')
self.assertEqual(pre_display_name, post_display_name,
'pre and post reboot service names '
'should be identical')
self.assertEqual(pre.get('instances')[0].get('state'),
post.get('instances')[0].get('state'),
'pre and post reboot status is not '
'equal:{0}\n{1}'
.format(pre.get('display_name'),
post.get('display_name')))
else:
self.assertEqual(pre.get('name'), post.get('name'),
'pre and post reboot status is not equal:'
'{0}\n {1}'.format(pre.get('name'),
post.get('name')))
def _wait_for_management(self, ip, timeout, port=80):
"""Wait for url to become available
:param ip: the manager IP
:param timeout: in seconds
:param port: port used by the rest service.
:return: True of False
"""
validation_url = 'http://{0}:{1}/blueprints'.format(ip, port)
end = time.time() + timeout
while end - time.time() >= 0:
try:
status = urllib.urlopen(validation_url).getcode()
if status == 200:
return True
except IOError:
time.sleep(5)
return False
def test_02_post_reboot(self):
is_docker_manager = self.is_docker_manager()
if not is_docker_manager:
undefined = self._get_undefined_services()
self.assertEqual(undefined, [],
'undefined services: {0}'
.format(','.join(undefined)))
stopped = self._get_stopped_services()
self.assertEqual(stopped, [], 'stopped services: {0}'
.format(','.join(stopped)))
def test_03_cfy_logs(self):
self._update_fabric_env()
fd, tmp_log_archive = tempfile.mkstemp()
os.close(fd)
self.logger.info('Testing `cfy logs get`')
try:
self.cfy.get_logs(destination_path=tmp_log_archive)
with closing(tarfile.open(name=tmp_log_archive)) as tar:
files = [f.name for f in tar.getmembers()]
self.assertIn('cloudify/journalctl.log', files)
self.assertIn('cloudify/nginx/cloudify.access.log', files)
self.logger.info('Success!')
finally:
os.remove(tmp_log_archive)
self.logger.info('Testing `cfy logs backup`')
self.cfy.backup_logs()
self.assertTrue(
sudo('tar -xzvf /var/log/cloudify-manager-logs_*').succeeded)
self.logger.info('Success!')
self.logger.info('Testing `cfy logs purge`')
self.cfy.purge_logs()
self.assertTrue(run(
'[ ! -s /var/log/cloudify/nginx/cloudify.access.log ]',).succeeded)
self.logger.info('Success!')
def test_04_tmux_session(self):
self._update_fabric_env()
self.logger.info('Test list without tmux installed...')
try:
self.cfy.ssh_list()
except sh.ErrorReturnCode_1 as ex:
self.assertIn('tmux executable not found on Manager', str(ex))
self.logger.info('Installing tmux...')
sudo('yum install tmux -y')
self.logger.info('Test listing sessions when non are available..')
output = self.cfy.ssh_list().stdout.splitlines()[-1]
self.assertIn('No sessions are available.', output)
sudo('yum remove tmux -y')
self.logger.info('Test running ssh command...')
self.cfy.ssh_run_command('echo yay! > /tmp/ssh_test_output_file')
self._check_remote_file_content('/tmp/ssh_test_output_file', 'yay!')
def _check_remote_file_content(self, remote_path, desired_content):
fd, temp_file = tempfile.mkstemp()
os.close(fd)
try:
fabric.api.get(remote_path, temp_file)
with open(temp_file) as f:
self.assertEqual(f.read().rstrip('\n\r'), desired_content)
finally:
os.remove(temp_file)
run('rm {0}'.format(remote_path))
def test_05_no_es_clustering(self):
"""Tests that when bootstrapping we don't cluster two elasticsearch
nodes.
This test mainly covers the use case where a user bootstraps two
managers on the same network.
The test runs two nodes on the same machine. If they're not clustered,
two nodes on different servers will definitely not be clustered.
"""
self._update_fabric_env()
self.logger.info('Duplicating elasticsearch config...')
sudo('mkdir /etc/es_test')
sudo('cp /etc/elasticsearch/elasticsearch.yml /etc/es_test/es.yml')
self.logger.info('Replacing ES REST port for second node...')
sed('/etc/es_test/es.yml', 'http.port: 9200', 'http.port: 9201',
use_sudo=True)
self.logger.info('Running second node...')
es_cmd = "/usr/share/elasticsearch/bin/elasticsearch \
-Des.pidfile='/var/run/elasticsearch/es_test.pid' \
-Des.default.path.home='/usr/share/elasticsearch' \
-Des.default.path.logs='/var/log/elasticsearch' \
-Des.default.path.data='/var/lib/elasticsearch' \
-Des.default.config='/etc/es_test/es.yml' \
-Des.default.path.conf='/etc/es_test'"
sudo('nohup {0} >& /dev/null < /dev/null &'.format(es_cmd),
pty=False)
# this is a good approximation of how much
# time it will take to load the node.
time.sleep(20)
node1_url = 'http://localhost:9200/_nodes'
node2_url = 'http://localhost:9201/_nodes'
def get_node_count(url):
# in case the node has not be loaded yet, this will retry.
curl_nodes = 'curl --retry 10 --show-error {0}'.format(url)
return len(json.loads(run(curl_nodes).stdout)['nodes'])
self.logger.info(
'Verifying that both nodes are running but not clustered...')
self.assertEqual(get_node_count(node1_url), 1)
self.assertEqual(get_node_count(node2_url), 1)
def test_06_logrotation(self):
"""Tests logrotation configuration on the manager.
This goes over some of the logs but for each of services
and performs logrotation based on the manager blueprint's provided
logrotate configuration. It then validates that logrotation occurs.
"""
self._update_fabric_env()
logs_dir = '/var/log/cloudify'
test_log_files = [
'elasticsearch/elasticsearch.log',
'influxdb/log.txt',
'mgmtworker/logs/test.log',
'rabbitmq/rabbit@cloudifyman.log',
'rest/cloudify-rest-service.log',
'logstash/logstash.log',
'nginx/cloudify.access.log',
'riemann/riemann.log',
'webui/backend.log'
]
# the mgmtworker doesn't create a log file upon loading so we're
# generating one for him.
sudo('touch /var/log/cloudify/mgmtworker/logs/test.log')
self.logger.info('Cancelling date suffix on rotation...')
sed('/etc/logrotate.conf', 'dateext', '#dateext', use_sudo=True)
for rotation in range(1, 9):
for log_file in test_log_files:
full_log_path = os.path.join(logs_dir, log_file)
self.logger.info('fallocating 101M in {0}...'.format(
full_log_path))
sudo('fallocate -l 101M {0}'.format(full_log_path))
self.logger.info('Running cron.hourly to apply rotation...')
sudo('run-parts /etc/cron.hourly')
rotated_log_path = full_log_path + '.{0}'.format(rotation)
compressed_log_path = rotated_log_path + '.gz'
with fabric.api.settings(warn_only=True):
if rotation == 8:
self.logger.info(
'Verifying overshot rotation did not occur: {0}...'
.format(compressed_log_path))
self.assertFalse(exists(compressed_log_path))
elif rotation == 1:
self.logger.info(
'Verifying rotated log exists: {0}...'.format(
rotated_log_path))
self.assertTrue(exists(rotated_log_path))
else:
self.logger.info(
'Verifying compressed log exists: {0}...'.format(
compressed_log_path))
self.assertTrue(exists(compressed_log_path))
def test_07_rabbitmq_policies(self):
"""Tests that rabbitmq policies are set accordingly.
The order of the policies matter. We create them according to a certain
order in the manager blueprint and that it is the order in which they
are tested. If we change the order in the policy creation mechanism, we
need to note that in the test to verify that they are kept in order.
"""
self._update_fabric_env()
default_policy = r'{"message-ttl":\d+,"max-length":\d+}'
policies = sudo('rabbitmqctl list_policies').stdout.split('\n')[1:]
self.logger.info('Verifying pattern in {0}...'.format(policies[0]))
self.assertIn('^cloudify-events$', policies[0])
self.assertRegexpMatches(policies[0], default_policy)
self.logger.info('Verifying pattern in {0}...'.format(policies[1]))
self.assertIn('^cloudify-logs$', policies[1])
self.assertRegexpMatches(policies[1], default_policy)
self.logger.info('Verifying pattern in {0}...'.format(policies[2]))
self.assertIn('^amq\\\\.gen.*$', policies[2])
self.assertRegexpMatches(policies[2], default_policy)
self.logger.info('Verifying pattern in {0}...'.format(policies[3]))
self.assertIn('^.*-riemann$', policies[3])
self.assertRegexpMatches(policies[3], default_policy)
| |
#
# peepdf is a tool to analyse and modify PDF files
# http://peepdf.eternal-todo.com
# By Jose Miguel Esparza <jesparza AT eternal-todo.com>
#
# Copyright (C) 2014 Jose Miguel Esparza
#
# This file is part of peepdf.
#
# peepdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peepdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peepdf. If not, see <http://www.gnu.org/licenses/>.
#
# Python version of the jjdecode function written by Syed Zainudeen
# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html
# +NCR/CRC! [ReVeRsEr] - crackinglandia@gmail.com
#
# The original algorithm was written in Javascript by Yosuke Hasegawa (http://utf-8.jp/public/jjencode.html)
#
# Modified to integrate it with peepdf
import re, sys
class JJDecoder(object):
def __init__(self, jj_encoded_data):
self.encoded_str = jj_encoded_data
def clean(self):
self.encoded_str = re.sub('^\s+|\s+$', '', self.encoded_str)
def checkPalindrome(self):
startpos = -1
endpos = -1
gv, gvl = -1, -1
index = self.encoded_str.find('"\'\\"+\'+",')
if index == 0:
startpos = self.encoded_str.find('$$+"\\""+') + 8
endpos = self.encoded_str.find('"\\"")())()')
gv = self.encoded_str[index+9:self.encoded_str.find('=~[]')]
gvl = len(gv)
else:
gv = self.encoded_str[0:self.encoded_str.find('=')]
gvl = len(gv)
startpos = self.encoded_str.find('"\\""+') + 5
endpos = self.encoded_str.find('"\\"")())()')
return (startpos, endpos, gv, gvl)
def decode(self):
self.clean()
startpos, endpos, gv, gvl = self.checkPalindrome()
if startpos == endpos:
return (-1,'There is no data to decode')
data = self.encoded_str[startpos:endpos]
b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+']
str_l = '(![]+"")[' + gv + '._$_]+'
str_o = gv + '._$+'
str_t = gv + '.__+'
str_u = gv + '._+'
str_hex = gv + '.'
str_s = '"'
gvsig = gv + '.'
str_quote = '\\\\\\"'
str_slash = '\\\\\\\\'
str_lower = '\\\\"+'
str_upper = '\\\\"+' + gv + '._+'
str_end = '"+'
out = ''
while data != '':
# l o t u
if data.find(str_l) == 0:
data = data[len(str_l):]
out += 'l'
continue
elif data.find(str_o) == 0:
data = data[len(str_o):]
out += 'o'
continue
elif data.find(str_t) == 0:
data = data[len(str_t):]
out += 't'
continue
elif data.find(str_u) == 0:
data = data[len(str_u):]
out += 'u'
continue
# 0123456789abcdef
if data.find(str_hex) == 0:
data = data[len(str_hex):]
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
out += '%x' % i
break
continue
# start of s block
if data.find(str_s) == 0:
data = data[len(str_s):]
# check if "R
if data.find(str_upper) == 0: # r4 n >= 128
data = data[len(str_upper):] # skip sig
ch_str = ''
for i in range(2): # shouldn't be more than 2 hex chars
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):]
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str = '%x' % k
break
else:
break
out += chr(int(ch_str, 16))
continue
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
break
else:
break
out += chr(int(ch_str,8)) + ch_lotux
continue
else: # "S ----> "SR or "S+
# if there is, loop s until R 0r +
# if there is no matching s block, throw error
match = 0;
n = None
# searching for matching pure s block
while True:
n = ord(data[0])
if data.find(str_quote) == 0:
data = data[len(str_quote):]
out += '"'
match += 1
continue
elif data.find(str_slash) == 0:
data = data[len(str_slash):]
out += '\\'
match += 1
continue
elif data.find(str_end) == 0: # reached end off S block ? +
if match == 0:
return (-1,'+ No match S block')
data = data[len(str_end):]
break # step out of the while loop
elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128z
if match == 0:
return (-1,'No match S block n>128')
data = data[len(str_upper):] # skip sig
ch_str = ''
ch_lotux = ''
for j in range(10): # shouldn't be more than 10 hex chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):] # skip gvsig
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str += '%x' % k
break
else:
break # done
out += chr(int(ch_str, 16))
break # step out of the while loop
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
if match == 0:
return (-1,'No match S block n<128!!')
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
else:
break
out += chr(int(ch_str, 8)) + ch_lotux
break # step out of the while loop
elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or ( 0x5b <= n and n <= 0x60 ) or ( 0x7b <= n and n <= 0x7f ):
out += data[0]
data = data[1:]
match += 1
continue
return (-1,'No match in the code!!')
break
return (0, out)
| |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as lib_constants
from oslo_log import log as logging
from neutron.agent.l3 import dvr_local_router
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
LOG = logging.getLogger(__name__)
class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def __init__(self, host, *args, **kwargs):
super(DvrEdgeRouter, self).__init__(host, *args, **kwargs)
self.snat_namespace = dvr_snat_ns.SnatNamespace(
self.router_id, self.agent_conf, self.driver, self.use_ipv6)
self.snat_iptables_manager = None
def get_gw_ns_name(self):
return self.snat_namespace.name
def external_gateway_added(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_added(
ex_gw_port, interface_name)
if self._is_this_snat_host():
self._create_dvr_gateway(ex_gw_port, interface_name)
# NOTE: When a router is created without a gateway the routes get
# added to the router namespace, but if we wanted to populate
# the same routes to the snat namespace after the gateway port
# is added, we need to call routes_updated here.
self.routes_updated([], self.router['routes'])
elif self.snat_namespace.exists():
# This is the case where the snat was moved manually or
# rescheduled to a different agent when the agent was dead.
LOG.debug("SNAT was moved or rescheduled to a different host "
"and does not match with the current host. This is "
"a stale namespace %s and will be cleared from the "
"current dvr_snat host.", self.snat_namespace.name)
self.external_gateway_removed(ex_gw_port, interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
if not self._is_this_snat_host():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
if self.snat_namespace.exists():
LOG.debug("SNAT was rescheduled to host %s. Clearing snat "
"namespace.", self.router.get('gw_port_host'))
return self.external_gateway_removed(
ex_gw_port, interface_name)
return
if not self.snat_namespace.exists():
# SNAT might be rescheduled to this agent; need to process like
# newly created gateway
return self.external_gateway_added(ex_gw_port, interface_name)
else:
self._external_gateway_added(ex_gw_port,
interface_name,
self.snat_namespace.name,
preserve_ips=[])
def _external_gateway_removed(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
if not self._is_this_snat_host() and not self.snat_namespace.exists():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
return
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.snat_namespace.name,
prefix=router.EXTERNAL_DEV_PREFIX)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._external_gateway_removed(ex_gw_port, interface_name)
if self.snat_namespace.exists():
self.snat_namespace.delete()
def internal_network_added(self, port):
super(DvrEdgeRouter, self).internal_network_added(port)
# TODO(gsagie) some of this checks are already implemented
# in the base class, think how to avoid re-doing them
if not self._is_this_snat_host():
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
interface_name = self._get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['fixed_ips'],
sn_port['mac_address'],
interface_name,
lib_constants.SNAT_INT_DEV_PREFIX,
mtu=sn_port.get('mtu'))
def _dvr_internal_network_removed(self, port):
super(DvrEdgeRouter, self)._dvr_internal_network_removed(port)
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
if not self._is_this_snat_host():
return
snat_interface = self._get_snat_int_device_name(sn_port['id'])
ns_name = self.snat_namespace.name
prefix = lib_constants.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def _plug_snat_port(self, port):
interface_name = self._get_snat_int_device_name(port['id'])
self._internal_network_added(
self.snat_namespace.name, port['network_id'],
port['id'], port['fixed_ips'],
port['mac_address'], interface_name,
lib_constants.SNAT_INT_DEV_PREFIX,
mtu=port.get('mtu'))
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
snat_ns = self._create_snat_namespace()
# connect snat_ports to br_int from SNAT namespace
for port in self.get_snat_interfaces():
self._plug_snat_port(port)
self._external_gateway_added(ex_gw_port, gw_interface_name,
snat_ns.name, preserve_ips=[])
self.snat_iptables_manager = iptables_manager.IptablesManager(
namespace=snat_ns.name,
use_ipv6=self.use_ipv6)
self._initialize_address_scope_iptables(self.snat_iptables_manager)
def _create_snat_namespace(self):
"""Create SNAT namespace."""
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace.create()
return self.snat_namespace
def _get_snat_int_device_name(self, port_id):
long_name = lib_constants.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
def _is_this_snat_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host == self.host
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self)._handle_router_snat_rules(
ex_gw_port, interface_name)
if not self._is_this_snat_host():
return
if not self.get_ex_gw_port():
return
if not self.snat_iptables_manager:
LOG.debug("DVR router: no snat rules to be handled")
return
with self.snat_iptables_manager.defer_apply():
self._empty_snat_chains(self.snat_iptables_manager)
# NOTE: float-snat should be added for the
# centralized floating-ips supported by the
# snat namespace.
self.snat_iptables_manager.ipv4['nat'].add_rule(
'snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
interface_name)
def update_routing_table(self, operation, route):
if self.get_ex_gw_port() and self._is_this_snat_host():
ns_name = self.snat_namespace.name
# NOTE: For now let us apply the static routes both in SNAT
# namespace and Router Namespace, to reduce the complexity.
if self.snat_namespace.exists():
super(DvrEdgeRouter, self)._update_routing_table(
operation, route, namespace=ns_name)
else:
LOG.error("The SNAT namespace %s does not exist for "
"the router.", ns_name)
super(DvrEdgeRouter, self).update_routing_table(operation, route)
def delete(self):
super(DvrEdgeRouter, self).delete()
if self.snat_namespace.exists():
self.snat_namespace.delete()
def process_address_scope(self):
super(DvrEdgeRouter, self).process_address_scope()
if not self._is_this_snat_host():
return
if not self.snat_iptables_manager:
LOG.debug("DVR router: no snat rules to be handled")
return
# Prepare address scope iptables rule for dvr snat interfaces
internal_ports = self.get_snat_interfaces()
ports_scopemark = self._get_port_devicename_scopemark(
internal_ports, self._get_snat_int_device_name)
# Prepare address scope iptables rule for external port
external_port = self.get_ex_gw_port()
if external_port:
external_port_scopemark = self._get_port_devicename_scopemark(
[external_port], self.get_external_device_name)
for ip_version in (lib_constants.IP_VERSION_4,
lib_constants.IP_VERSION_6):
ports_scopemark[ip_version].update(
external_port_scopemark[ip_version])
with self.snat_iptables_manager.defer_apply():
self._add_address_scope_mark(
self.snat_iptables_manager, ports_scopemark)
def _delete_stale_external_devices(self, interface_name):
if not self.snat_namespace.exists():
return
ns_ip = ip_lib.IPWrapper(namespace=self.snat_namespace.name)
for d in ns_ip.get_devices():
if (d.name.startswith(router.EXTERNAL_DEV_PREFIX) and
d.name != interface_name):
LOG.debug('Deleting stale external router device: %s', d.name)
self.driver.unplug(
d.name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.snat_namespace.name,
prefix=router.EXTERNAL_DEV_PREFIX)
def get_snat_external_device_interface_name(self, ex_gw_port):
long_name = router.EXTERNAL_DEV_PREFIX + ex_gw_port['id']
return long_name[:self.driver.DEV_NAME_LEN]
def _get_centralized_fip_cidr_set(self):
"""Returns the fip_cidr set for centralized floatingips."""
interface_name = self.get_snat_external_device_interface_name(
self.get_ex_gw_port())
device = ip_lib.IPDevice(
interface_name, namespace=self.snat_namespace.name)
return set([addr['cidr'] for addr in device.addr.list()])
def get_router_cidrs(self, device):
"""Over-ride the get_router_cidrs function to return the list.
This function is overridden to provide the complete list of
floating_ip cidrs that the router hosts.
This includes the centralized floatingip cidr list and the
regular floatingip cidr list that are bound to fip namespace.
"""
fip_cidrs = super(DvrEdgeRouter, self).get_router_cidrs(device)
centralized_cidrs = set()
# Call _get_centralized_fip_cidr only when snat_namespace exists
if self.get_ex_gw_port() and self.snat_namespace.exists():
centralized_cidrs = self._get_centralized_fip_cidr_set()
existing_centralized_cidrs = self.centralized_floatingips_set
return fip_cidrs | centralized_cidrs | existing_centralized_cidrs
def remove_centralized_floatingip(self, fip_cidr):
"""Function to handle the centralized Floatingip remove."""
if not self.get_ex_gw_port():
return
if not self._is_this_snat_host():
return
interface_name = self.get_snat_external_device_interface_name(
self.get_ex_gw_port())
device = ip_lib.IPDevice(
interface_name, namespace=self.snat_namespace.name)
device.delete_addr_and_conntrack_state(fip_cidr)
self.process_floating_ip_nat_rules_for_centralized_floatingip()
def add_centralized_floatingip(self, fip, fip_cidr):
"""Function to handle the centralized Floatingip addition."""
if not self.get_ex_gw_port():
return
if not self._is_this_snat_host():
return
interface_name = self.get_snat_external_device_interface_name(
self.get_ex_gw_port())
device = ip_lib.IPDevice(
interface_name, namespace=self.snat_namespace.name)
try:
device.addr.add(fip_cidr)
except RuntimeError:
LOG.warning("Unable to configure IP address for centralized "
"floating IP: %s", fip['id'])
return lib_constants.FLOATINGIP_STATUS_ERROR
self.process_floating_ip_nat_rules_for_centralized_floatingip()
# Send a GARP message on the external interface for the
# centralized floatingip configured.
ip_lib.send_ip_addr_adv_notif(self.snat_namespace.name,
interface_name,
fip['floating_ip_address'])
return lib_constants.FLOATINGIP_STATUS_ACTIVE
def _centralized_floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s/32 -j SNAT --to-source %s' %
(fixed_ip, floating_ip))]
def _set_floating_ip_nat_rules_for_centralized_floatingip(self, fip):
if fip.get(lib_constants.DVR_SNAT_BOUND):
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self._centralized_floating_forward_rules(
fip_ip, fixed):
self.snat_iptables_manager.ipv4['nat'].add_rule(
chain, rule, tag='floating_ip')
def process_floating_ip_nat_rules_for_centralized_floatingip(self):
self.snat_iptables_manager.ipv4['nat'].clear_rules_by_tag(
'floating_ip')
floating_ips = self.get_floating_ips()
for fip in floating_ips:
self._set_floating_ip_nat_rules_for_centralized_floatingip(fip)
self.snat_iptables_manager.apply()
| |
#!/usr/bin/env python3
import argparse as ap
import os
import sys
import re
# all the warnings GKLEE can emit as well as the program's name
# dict(string -> tuple of regular expressions)
GKLEE = { "prog" : "GKLEE",
"volatile" : (
re.compile("These two threads access common memory location, it is "+
"better to set shared variables as volatile!"), ),
"read write" : (
re.compile("Across different warps, threads \\d+ and \\d+ incur a "+
"Read-Write race \(Actual\) on "),
re.compile("Under the pure canonical schedule, thread \\d+ and \\d+ "+
"incur a Write-Read race \(Actual\) on "),
re.compile("Within a warp, because of branch divergence, threads "+
"\\d+ and \\d+ incur a Read-Write race \(Actual\) on"),
re.compile("One thread at BI \\d+ of Block \\d+ incurs a read-write "+
"race with the thread at BI \\d+ of Block \\d+"),
re.compile("One thread at BI \\d+ of Block \\d+ incurs a write-read "+
"race with the thread at BI \\d+ of Block \\d+"),
re.compile("Under pure canonical schedule, a read-write race is "
"found from BI \\d+ of the block \\d+") ),
"benign read write" : (
re.compile("incur a Write-Read race with the "+
"same value \(Benign\) on"), ),
"write write" : (
re.compile("Within a warp, threads \\d+ and \\d+ incur a "+
"Write-Write race \(Actual\) on "),
re.compile("Across different warps, threads \\d+ and \\d+ incur a "+
"Write-Write race \(Actual\) on "),
re.compile("Under the pure canonical schedule, within a block, "+
"thread \\d+ and \\d+ incur a Write-Write race \(Actual\) "+
"on "),
re.compile("Within a warp, because of branch divergence, threads "+
"\\d+ and \\d+ incur a Write-Write race \(Actual\) on "),
re.compile("Under pure canonical schedule, a write-write race is "+
"found from BI \\d+ of the block \\d+"),
re.compile("One thread at BI \\d+ of Block \\d+ incurs a "+
"write-write race with the thread at BI \\d+ of Block "+
"\\d+") ),
"benign write write" : (
re.compile("Within a warp, threads \\d+ and \\d+ incur a "+
"Write-Write race with the same value \(Benign\) on "),
re.compile("Across different warps, threads \\d+ and \\d+ incur a "+
"Write-Write race with same value\(Benign\) on "),
re.compile("Across different warps, threads \\d+ and \\d+ incur a "+
"Write-Write race with same value \(Benign\) on "),
re.compile("Under the pure canonical schedule, within a block, "+
"thread \\d+ and \\d+ incur a Write-Write race with the "+
"same value \(Benign\) on "),
re.compile("Under the pure canonical schedule, across different "+
"blocks, thread \\d+ and \\d+ incur a Write-Write race "+
"with the same value \(Benign\) on "),
re.compile("Within a warp, because of branch divergence, threads "+
"\\d+ and \\d+ incur a Write-Write race with the same "+
"value \(Benign\) on "),
re.compile("incur a Write-Write race with the same value \(Benign\)"), ),
"barrier" : (
re.compile("violating the property that barriers have to be "+
"textually aligned"),
re.compile("execution halts on a barrier mismatch") ),
"bounds" : (
re.compile("memory error: out of bound pointer"), ),
}
# all the warnings GKLEEp can emit as well as the program's name
# dict string -> tuple of regualr expressions
GKLEEp = { "prog" : "GKLEEp",
"volatile" : (
re.compile("so 'volatile' qualifier required"), ),
"read write" : (
re.compile("incur the \(Actual\) read-write race"), ),
"benign read write" : (
re.compile("incur the \(Benign\) write-write race"), ),
"write write" : (
re.compile("incur the \(Actual\) write-write race"), ),
"benign write write" : (), # no benign write write from GKLEEp ?
"barrier" : (
re.compile(" encounter different barrier sequences"),
re.compile("violating the property that barriers have to be "+
"textually aligned"),
re.compile("execution halts on a barrier mismatch") ),
"bounds" : (
re.compile("memory error: out of bound pointer"), ),
}
def read_expected(directory, gklee_tanslator):
"""
Reads the expected.txt file and extracts the warnings that GKLEE and
co should produce. Error is produced on unknown input
* directory : either a string representation or an os.directory object
which is the location of the text file
* gklee_dict : a dict from warning name to tuple of regualr
expressions for the warning
returns : a list of tuples containing the warning name to tuples of the
regualr expressions
"""
with open(directory+"expected.txt") as e:
lines = [line.strip() for line in e if line.strip() != ""]
return [(line, gklee_dict[line]) for line in lines if line != "prog"]
def generate_not_expected(expected, gklee_dict):
"""
Generates the inverse of the expected list
* expected : a list of tuples which contain a warning name and a tuple of
regular expressions for the warnings
* gklee_dict : a dict from warning name to tuple of regualr
expressions for the warning
returns : a list of tuples containing the warning name to tuples of the
regualr expressions
"""
full_dict = gklee_dict.copy()
full_set = set(full_dict.keys())
not_expected = full_set - {e[0] for e in expected}
return [(ne, gklee_dict[ne]) for ne in not_expected if ne != "prog"]
def read_actual(directory, gklee_dict):
"""
Reads in the log file for the given Gklee* implementation
* directory : either a string representation or an os.directory object
which is the location of the text file
* gklee_dict : a dict from warning name to tuple of regualr
expressions for the warning
returns : the text of the file
"""
with open(directory+gklee_dict["prog"].lower()+"_log.txt") as a:
text = a.read()
return text
CORE_DUMP_RE = re.compile("Segmentation fault")
def verify(expected, actual, gklee_dict):
"""
Verifies that the output of Gklee* matches the expected output
* expected : a list of tuples which contain a warning name and a tuple of
regular expressions for the warnings
* actual : the plaintext of Gklee*'s output
returns : nothing
"""
passed = True
if CORE_DUMP_RE.search(actual) != None:
print("FATAL ERROR IN {} CORE DUMPED".format(gklee_dict["prog"]))
for e in expected:
# True means regex not found
# False means regex found
matches = map(lambda reg: reg.search(actual) == None, e[1])
if False not in matches: # if no regex mached
passed = False
print("Omission by {} : expected {}".format(gklee_dict["prog"],
e[0]))
not_expected = generate_not_expected(expected, gklee_dict)
for ne in not_expected:
matches = map(lambda reg: reg.search(actual) == None, ne[1])
if False in matches: # if one ore more regex matched
passed = False
print("False alarm by {} : recieved {}".format(gklee_dict["prog"],
ne[0]))
if passed:
print("Test passed by {}".format(gklee_dict["prog"]))
if __name__ == "__main__":
parser = ap.ArgumentParser(description="Verifies if the output of GKLEE "+
"and GKLEEp match the expected values")
parser.add_argument("directory",
nargs='?',
help="the directory where the test was ran,"+
" defaults to current",
default="./")
args = parser.parse_args()
if not os.path.isdir(args.directory):
print("ERROR: {} is not a directory".format(args.directory))
sys.exit(-1)
for gklee_dict in [GKLEE, GKLEEp]:
expected = read_expected(args.directory, gklee_dict)
actual = read_actual(args.directory, gklee_dict)
verify(expected, actual, gklee_dict)
| |
# -*- coding: utf-8 -*-
"""
test_invalid_headers.py
~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests that use invalid header blocks, and validates that
they fail appropriately.
"""
import itertools
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.settings
import h2.utilities
import hyperframe.frame
from hypothesis import given
from hypothesis.strategies import binary, lists, tuples
HEADERS_STRATEGY = lists(tuples(binary(min_size=1), binary()))
class TestInvalidFrameSequences(object):
"""
Invalid header sequences cause ProtocolErrors to be thrown when received.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [('Uppercase', 'name')],
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')],
base_request_headers + [('te', 'chunked')],
base_request_headers + [('host', 'notexample.com')],
base_request_headers + [(' name', 'name with leading space')],
base_request_headers + [('name ', 'name with trailing space')],
base_request_headers + [('name', ' value with leading space')],
base_request_headers + [('name', 'value with trailing space ')],
[header for header in base_request_headers
if header[0] != ':authority'],
[(':protocol', 'websocket')] + base_request_headers,
]
server_config = h2.config.H2Configuration(
client_side=False, header_encoding='utf-8'
)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test invalid headers are rejected with PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_push_promise_event(self, frame_factory, headers):
"""
If a PUSH_PROMISE header frame is received with an invalid header block
it is rejected with a PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.base_request_headers, end_stream=True
)
c.clear_outbound_data_buffer()
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=headers
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_push_promise_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_inbound_headers`` disabled, then invalid header
blocks in push promise frames are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=True,
validate_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.base_request_headers, end_stream=True
)
c.clear_outbound_data_buffer()
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=headers
)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
pp_event = events[0]
assert pp_event.headers == headers
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_inbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
def test_te_trailers_is_valid(self, frame_factory):
"""
`te: trailers` is allowed by the filter.
"""
headers = (
self.base_request_headers + [('te', 'trailers')]
)
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
def test_pseudo_headers_rejected_in_trailer(self, frame_factory):
"""
Ensure we reject pseudo headers included in trailers
"""
trailers = [(':path', '/'), ('extra', 'value')]
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
trailer_frame = frame_factory.build_headers_frame(
trailers, flags=["END_STREAM"]
)
head = header_frame.serialize()
trailer = trailer_frame.serialize()
c.receive_data(head)
# Raise exception if pseudo header in trailer
with pytest.raises(h2.exceptions.ProtocolError) as e:
c.receive_data(trailer)
assert "pseudo-header in trailer" in str(e.value)
# Test appropriate response frame is generated
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
class TestSendingInvalidFrameSequences(object):
"""
Trying to send invalid header sequences cause ProtocolErrors to
be thrown.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('te', 'chunked')],
base_request_headers + [('host', 'notexample.com')],
[header for header in base_request_headers
if header[0] != ':authority'],
]
strippable_header_blocks = [
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')]
]
all_header_blocks = invalid_header_blocks + strippable_header_blocks
server_config = h2.config.H2Configuration(client_side=False)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test sending invalid headers raise a ProtocolError.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Clear the data, then try to send headers.
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError):
c.send_headers(1, headers)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_send_push_promise(self, frame_factory, headers):
"""
Sending invalid headers in a push promise raises a ProtocolError.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
# Clear the data, then try to send a push promise.
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError):
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
@pytest.mark.parametrize('headers', all_header_blocks)
def test_headers_event_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
validate_outbound_headers=False
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
# Clear the data, then send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
# Ensure headers are still normalized.
norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
f = frame_factory.build_headers_frame(norm_headers)
assert c.data_to_send() == f.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_push_promise_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_outbound_headers=False,
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
# Create push promise frame with normalized headers.
frame_factory.refresh_encoder()
norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
pp_frame = frame_factory.build_push_promise_frame(
stream_id=1, promised_stream_id=2, headers=norm_headers
)
# Clear the data, then send a push promise.
c.clear_outbound_data_buffer()
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
assert c.data_to_send() == pp_frame.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_headers_event_skip_normalization(self, frame_factory, headers):
"""
If we have ``normalize_outbound_headers`` disabled, then all of these
invalid header blocks are sent through unmodified.
"""
config = h2.config.H2Configuration(
validate_outbound_headers=False,
normalize_outbound_headers=False
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
f = frame_factory.build_headers_frame(
headers,
stream_id=1,
)
# Clear the data, then send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
assert c.data_to_send() == f.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_push_promise_skip_normalization(self, frame_factory, headers):
"""
If we have ``normalize_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass unmodified.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_outbound_headers=False,
normalize_outbound_headers=False,
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
frame_factory.refresh_encoder()
pp_frame = frame_factory.build_push_promise_frame(
stream_id=1, promised_stream_id=2, headers=headers
)
# Clear the data, then send a push promise.
c.clear_outbound_data_buffer()
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
assert c.data_to_send() == pp_frame.serialize()
@pytest.mark.parametrize('headers', strippable_header_blocks)
def test_strippable_headers(self, frame_factory, headers):
"""
Test connection related headers are removed before sending.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Clear the data, then try to send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
f = frame_factory.build_headers_frame(self.base_request_headers)
assert c.data_to_send() == f.serialize()
class TestFilter(object):
"""
Test the filter function directly.
These tests exists to confirm the behaviour of the filter function in a
wide range of scenarios. Many of these scenarios may not be legal for
HTTP/2 and so may never hit the function, but it's worth validating that it
behaves as expected anyway.
"""
validation_functions = [
h2.utilities.validate_headers,
h2.utilities.validate_outbound_headers
]
hdr_validation_combos = [
h2.utilities.HeaderValidationFlags(
is_client, is_trailer, is_response_header, is_push_promise
)
for is_client, is_trailer, is_response_header, is_push_promise in (
itertools.product([True, False], repeat=4)
)
]
hdr_validation_response_headers = [
flags for flags in hdr_validation_combos
if flags.is_response_header
]
hdr_validation_request_headers_no_trailer = [
flags for flags in hdr_validation_combos
if not (flags.is_trailer or flags.is_response_header)
]
invalid_request_header_blocks_bytes = (
# First, missing :method
(
(b':authority', b'google.com'),
(b':path', b'/'),
(b':scheme', b'https'),
),
# Next, missing :path
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':scheme', b'https'),
),
# Next, missing :scheme
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':path', b'/'),
),
# Finally, path present but empty.
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':scheme', b'https'),
(b':path', b''),
),
)
invalid_request_header_blocks_unicode = (
# First, missing :method
(
(':authority', 'google.com'),
(':path', '/'),
(':scheme', 'https'),
),
# Next, missing :path
(
(':authority', 'google.com'),
(':method', 'GET'),
(':scheme', 'https'),
),
# Next, missing :scheme
(
(':authority', 'google.com'),
(':method', 'GET'),
(':path', '/'),
),
# Finally, path present but empty.
(
(':authority', 'google.com'),
(':method', 'GET'),
(':scheme', 'https'),
(':path', ''),
),
)
# All headers that are forbidden from either request or response blocks.
forbidden_request_headers_bytes = (b':status',)
forbidden_request_headers_unicode = (':status',)
forbidden_response_headers_bytes = (
b':path', b':scheme', b':authority', b':method'
)
forbidden_response_headers_unicode = (
':path', ':scheme', ':authority', ':method'
)
@pytest.mark.parametrize('validation_function', validation_functions)
@pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
@given(headers=HEADERS_STRATEGY)
def test_range_of_acceptable_outputs(self,
headers,
validation_function,
hdr_validation_flags):
"""
The header validation functions either return the data unchanged
or throw a ProtocolError.
"""
try:
assert headers == list(validation_function(
headers, hdr_validation_flags))
except h2.exceptions.ProtocolError:
assert True
@pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
def test_invalid_pseudo_headers(self, hdr_validation_flags):
headers = [(b':custom', b'value')]
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize('validation_function', validation_functions)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
def test_matching_authority_host_headers(self,
validation_function,
hdr_validation_flags):
"""
If a header block has :authority and Host headers and they match,
the headers should pass through unchanged.
"""
headers = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
(b'host', b'example.com'),
]
assert headers == list(h2.utilities.validate_headers(
headers, hdr_validation_flags
))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
def test_response_header_without_status(self, hdr_validation_flags):
headers = [(b'content-length', b'42')]
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'header_block',
(
invalid_request_header_blocks_bytes +
invalid_request_header_blocks_unicode
)
)
def test_outbound_req_header_missing_pseudo_headers(self,
hdr_validation_flags,
header_block):
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
header_block, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'header_block', invalid_request_header_blocks_bytes
)
def test_inbound_req_header_missing_pseudo_headers(self,
hdr_validation_flags,
header_block):
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_headers(
header_block, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_request_headers_bytes + forbidden_request_headers_unicode
)
def test_outbound_req_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Outbound request header blocks containing the forbidden request headers
fail validation.
"""
headers = [
(b':path', b'/'),
(b':scheme', b'https'),
(b':authority', b'google.com'),
(b':method', b'GET'),
]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
headers, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_request_headers_bytes
)
def test_inbound_req_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Inbound request header blocks containing the forbidden request headers
fail validation.
"""
headers = [
(b':path', b'/'),
(b':scheme', b'https'),
(b':authority', b'google.com'),
(b':method', b'GET'),
]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_response_headers_bytes + forbidden_response_headers_unicode
)
def test_outbound_resp_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Outbound response header blocks containing the forbidden response
headers fail validation.
"""
headers = [(b':status', b'200')]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
headers, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_response_headers_bytes
)
def test_inbound_resp_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Inbound response header blocks containing the forbidden response
headers fail validation.
"""
headers = [(b':status', b'200')]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
def test_inbound_header_name_length(self, hdr_validation_flags):
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers([(b'', b'foobar')], hdr_validation_flags))
def test_inbound_header_name_length_full_frame_decode(self, frame_factory):
f = frame_factory.build_headers_frame([])
f.data = b"\x00\x00\x05\x00\x00\x00\x00\x04"
data = f.serialize()
c = h2.connection.H2Connection(config=h2.config.H2Configuration(client_side=False))
c.initiate_connection()
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError, match="Received header name with zero length."):
c.receive_data(data)
class TestOversizedHeaders(object):
"""
Tests that oversized header blocks are correctly rejected. This replicates
the "HPACK Bomb" attack, and confirms that we're resistant against it.
"""
request_header_block = [
(b':method', b'GET'),
(b':authority', b'example.com'),
(b':scheme', b'https'),
(b':path', b'/'),
]
response_header_block = [
(b':status', b'200'),
]
# The first header block contains a single header that fills the header
# table. To do that, we'll give it a single-character header name and a
# 4063 byte header value. This will make it exactly the size of the header
# table. It must come last, so that it evicts all other headers.
# This block must be appended to either a request or response block.
first_header_block = [
(b'a', b'a' * 4063),
]
# The second header "block" is actually a custom HEADERS frame body that
# simply repeatedly refers to the first entry for 16kB. Each byte has the
# high bit set (0x80), and then uses the remaining 7 bits to encode the
# number 62 (0x3e), leading to a repeat of the byte 0xbe.
second_header_block = b'\xbe' * 2**14
server_config = h2.config.H2Configuration(client_side=False)
def test_hpack_bomb_request(self, frame_factory):
"""
A HPACK bomb request causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.request_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload.
attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
attack_frame.data = self.second_header_block
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_hpack_bomb_response(self, frame_factory):
"""
A HPACK bomb response causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.request_header_block
)
c.send_headers(
stream_id=3, headers=self.request_header_block
)
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.response_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload.
attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
attack_frame.data = self.second_header_block
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_hpack_bomb_push(self, frame_factory):
"""
A HPACK bomb push causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.request_header_block
)
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.response_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload. We need to shrink it by four bytes because
# the promised_stream_id consumes four bytes of body.
attack_frame = hyperframe.frame.PushPromiseFrame(stream_id=3)
attack_frame.promised_stream_id = 2
attack_frame.data = self.second_header_block[:-4]
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_when_list_size_shrunk(self, frame_factory):
"""
When we've shrunk the header list size, we reject new header blocks
that violate the new size.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now, send a settings change. It's un-ACKed at this time. A new
# request arrives, also without incident.
c.update_settings({h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 50})
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
stream_id=3,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# We get a SETTINGS ACK.
f = frame_factory.build_settings_frame({}, ack=True)
data = f.serialize()
c.receive_data(data)
# Now a third request comes in. This explodes.
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=3, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_when_table_size_shrunk(self, frame_factory):
"""
When we've shrunk the header table size, we reject header blocks that
do not respect the change.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now, send a settings change. It's un-ACKed at this time. A new
# request arrives, also without incident.
c.update_settings({h2.settings.SettingCodes.HEADER_TABLE_SIZE: 128})
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
stream_id=3,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# We get a SETTINGS ACK.
f = frame_factory.build_settings_frame({}, ack=True)
data = f.serialize()
c.receive_data(data)
# Now a third request comes in. This explodes, as it does not contain
# a dynamic table size update.
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=3, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_exceeding_table_size(self, frame_factory):
"""
When the remote peer sends a dynamic table size update that exceeds our
setting, we reject it.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now a second request comes in that sets the table size too high.
# This explodes.
frame_factory.change_table_size(c.local_settings.header_table_size + 1)
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import urllib2
from timmy import tools
from timmy import conf
from timmy.nodes import NodeManager as BaseNodeManager
from timmy.nodes import Node as BaseNode
try:
import fuelclient
if hasattr(fuelclient, 'connect'):
# fuel > 9.0.1 - drop support, use API and CLI instead
FuelClient = None
else:
import fuelclient.client
if type(fuelclient.client.APIClient) is fuelclient.client.Client:
# fuel 9.0.1 and below
from fuelclient.client import Client as FuelClient
else:
FuelClient = None
except:
FuelClient = None
try:
from fuelclient.client import logger
logger.handlers = []
except:
pass
def add_args(parser):
parser.add_argument('--fuel-ip', help='fuel ip address')
parser.add_argument('--fuel-user', help='fuel username')
parser.add_argument('--fuel-pass', help='fuel password')
parser.add_argument('--fuel-token', help='fuel auth token')
parser.add_argument('--fuel-logs-no-remote', action='store_true',
help='Do not collect remote logs from Fuel.')
parser.add_argument('--fuel-proxy',
help='use os system proxy variables for fuelclient',
action='store_true')
parser.add_argument('-j', '--nodes-json',
help=('Path to a json file retrieved via'
' "fuel node --json". Useful to speed up'
' initialization, skips "fuel node" call.'))
return parser
def prepare_args():
import argparse
parser = argparse.ArgumentParser()
parser = add_args(parser)
return parser
def check_args(args, conf):
if args.fuel_ip:
conf['fuel_ip'] = args.fuel_ip
if args.fuel_user:
conf['fuel_user'] = args.fuel_user
if args.fuel_pass:
conf['fuel_pass'] = args.fuel_pass
if args.fuel_proxy:
conf['fuel_skip_proxy'] = False
if args.fuel_token:
conf['fuel_api_token'] = args.fuel_token
conf['fuelclient'] = False
if args.fuel_logs_no_remote:
conf['fuel_logs_no_remote'] = True
def add_conf(conf):
conf['fuel_ip'] = '127.0.0.1'
conf['fuel_api_user'] = 'admin'
conf['fuel_api_pass'] = 'admin'
conf['fuel_api_token'] = None
conf['fuel_api_tenant'] = 'admin'
conf['fuel_api_port'] = '8000'
conf['fuel_api_keystone_port'] = '5000'
# The three parameters below are used to override FuelClient, API, CLI auth
conf['fuel_user'] = None
conf['fuel_pass'] = None
conf['fuel_tenant'] = None
conf['fuelclient'] = True # use fuelclient library by default
conf['fuel_skip_proxy'] = True
conf['fuel_logs_remote_dir'] = ['/var/log/docker-logs/remote',
'/var/log/remote']
conf['fuel_logs_no_remote'] = False # do not collect /var/log/remote
'''Do not collect from /var/log/remote/<node>
if node is in the array of nodes filtered out by soft filter'''
conf['fuel_logs_exclude_filtered'] = True
return conf
class Node(BaseNode):
def get_release(self):
if self.id == 0:
cmd = ("awk -F ':' '/release/ {print $2}' "
"/etc/nailgun/version.yaml")
else:
cmd = ("awk -F ':' '/fuel_version/ {print $2}' "
"/etc/astute.yaml")
release, err, code = tools.ssh_node(ip=self.ip,
command=cmd,
ssh_opts=self.ssh_opts,
timeout=self.timeout,
prefix=self.prefix)
if code != 0:
self.logger.warning('%s: could not determine'
' MOS release' % self.repr)
release = 'n/a'
else:
release = release.strip('\n "\'')
self.logger.info('%s, MOS release: %s' %
(self.repr, release))
return release
def get_roles_hiera(self):
def trim_primary(roles):
trim_roles = [r for r in roles if not r.startswith('primary-')]
trim_roles += [r[8:] for r in roles if r.startswith('primary-')]
return trim_roles
self.logger.debug('%s: roles not defined, trying hiera' % self.repr)
cmd = 'hiera roles'
outs, errs, code = tools.ssh_node(ip=self.ip,
command=cmd,
ssh_opts=self.ssh_opts,
env_vars=self.env_vars,
timeout=self.timeout,
prefix=self.prefix)
self.check_code(code, 'get_roles_hiera', cmd, errs, [0])
if code == 0:
try:
roles = trim_primary(json.loads(outs))
except:
self.logger.warning("%s: failed to parse '%s' output as JSON" %
(self.repr, cmd))
return self.roles
self.logger.debug('%s: got roles: %s' % (self.repr, roles))
if roles is not None:
return roles
else:
return self.roles
else:
self.logger.warning("%s: failed to load roles via hiera" %
self.repr)
self.roles
def get_cluster_id(self):
self.logger.debug('%s: cluster id not defined, trying to determine' %
self.repr)
astute_file = '/etc/astute.yaml'
cmd = ("python -c 'import yaml; a = yaml.load(open(\"%s\")"
".read()); print a[\"cluster\"][\"id\"]'" % astute_file)
outs, errs, code = tools.ssh_node(ip=self.ip,
command=cmd,
ssh_opts=self.ssh_opts,
env_vars=self.env_vars,
timeout=self.timeout,
prefix=self.prefix)
return int(outs.rstrip('\n')) if code == 0 else None
def log_item_manipulate(self, item):
if self.fuel_logs_no_remote and 'fuel' in self.roles:
self.logger.debug('adding Fuel remote logs to exclude list')
if 'exclude' not in item:
item['exclude'] = []
for remote_dir in self.fuel_logs_remote_dir:
item['exclude'].append(remote_dir)
if 'fuel' in self.roles:
for n in self.logs_excluded_nodes:
self.logger.debug('removing remote logs for node:%s' % n)
if 'exclude' not in item:
item['exclude'] = []
for remote_dir in self.fuel_logs_remote_dir:
ipd = os.path.join(remote_dir, n)
item['exclude'].append(ipd)
class NodeManager(BaseNodeManager):
@staticmethod
def load_conf(filename):
config = conf.init_default_conf()
config = add_conf(config)
config = conf.update_conf(config, filename)
return config
def __init__(self, conf, nodes_json=None, logger=None):
self.base_init(conf, logger)
self.token = self.conf['fuel_api_token']
fuelnode = self.fuel_init()
self.logs_excluded_nodes = []
if FuelClient and conf['fuelclient']:
# save os environment variables
environ = os.environ
try:
if self.conf['fuel_skip_proxy']:
os.environ['HTTPS_PROXY'] = ''
os.environ['HTTP_PROXY'] = ''
os.environ['https_proxy'] = ''
os.environ['http_proxy'] = ''
self.logger.info('Setup fuelclient instance')
self.fuelclient = FuelClient()
if self.conf['fuel_user']:
self.fuelclient.username = self.conf['fuel_user']
if self.conf['fuel_pass']:
self.fuelclient.password = self.conf['fuel_pass']
if self.conf['fuel_tenant']:
self.fuelclient.tenant_name = self.conf['fuel_tenant']
# self.fuelclient.debug_mode(True)
except Exception as e:
self.logger.info('Failed to setup fuelclient instance:%s' % e,
exc_info=True)
self.fuelclient = None
os.environ = environ
else:
self.logger.info('Skipping setup fuelclient instance')
self.fuelclient = None
if nodes_json:
self.nodes_json = tools.load_json_file(nodes_json)
else:
if (not self.get_nodes_fuelclient() and
not self.get_nodes_api() and
not self.get_nodes_cli()):
sys.exit(105)
self.nodes_init(Node)
# get release information for all nodes
if fuelnode.accessible:
self.get_release()
self.post_init()
fuelnode.logs_excluded_nodes = self.logs_excluded_nodes
def fuel_init(self):
if not self.conf['fuel_ip']:
self.logger.critical('NodeManager: fuel_ip not set')
sys.exit(106)
fuelnode = Node(id=0,
cluster=0,
name='fuel',
fqdn='n/a',
mac='n/a',
os_platform='centos',
roles=['fuel'],
status='ready',
online=True,
ip=self.conf['fuel_ip'],
conf=self.conf)
fuelnode.cluster_repr = ""
fuelnode.repr = "fuel"
# soft-skip Fuel if it is hard-filtered
if not self.filter(fuelnode, self.conf['hard_filter']):
fuelnode.skipped = True
self.nodes[self.conf['fuel_ip']] = fuelnode
return fuelnode
def apply_soft_filter(self):
# apply soft-filter on all nodes
for node in self.nodes.values():
if not self.filter(node, self.conf['soft_filter']):
node.skipped = True
if self.conf['fuel_logs_exclude_filtered']:
if node.fqdn:
self.logs_excluded_nodes.append(node.fqdn)
self.logs_excluded_nodes.append(node.ip)
def get_release(self):
if (not self.get_release_fuel_client() and
not self.get_release_api() and
not self.get_release_cli()):
self.logger.warning('could not get Fuel and MOS versions')
def get_nodes_fuelclient(self):
if not self.fuelclient:
return False
try:
self.logger.info('using fuelclient to get nodes json')
self.nodes_json = self.fuelclient.get_request('nodes')
return True
except Exception as e:
self.logger.warning(("NodeManager: can't "
"get node list from fuel client:\n%s" % (e)),
exc_info=True)
return False
def get_release_api(self):
self.logger.info('getting release via API')
version_json = self.get_api_request('version')
if version_json:
version = json.loads(version_json)
fuel = self.nodes[self.conf['fuel_ip']]
fuel.release = version['release']
else:
return False
clusters_json = self.get_api_request('clusters')
if clusters_json:
clusters = json.loads(clusters_json)
self.set_nodes_release(clusters)
return True
else:
return False
def get_release_fuel_client(self):
if not self.fuelclient:
return False
self.logger.info('getting release via fuelclient')
try:
v = self.fuelclient.get_request('version')
fuel_version = v['release']
self.logger.debug('version response:%s' % v)
clusters = self.fuelclient.get_request('clusters')
self.logger.debug('clusters response:%s' % clusters)
except:
self.logger.warning(("Can't get fuel version or "
"clusters information"))
return False
self.nodes[self.conf['fuel_ip']].release = fuel_version
self.set_nodes_release(clusters)
return True
def auth_token(self):
'''Get keystone token to access Nailgun API. Requires Fuel 5+'''
if self.token:
return True
self.logger.info('getting token for Nailgun')
v2_body = ('{"auth": {"tenantName": "%s", "passwordCredentials": {'
'"username": "%s", "password": "%s"}}}')
# v3 not fully implemented yet
# v3_body = ('{ "auth": {'
# ' "scope": {'
# ' "project": {'
# ' "name": "%s",'
# ' "domain": { "id": "default" }'
# ' }'
# ' },'
# ' "identity": {'
# ' "methods": ["password"],'
# ' "password": {'
# ' "user": {'
# ' "name": "%s",'
# ' "domain": { "id": "default" },'
# ' "password": "%s"'
# ' }'
# ' }'
# ' }'
# '}}')
# Sticking to v2 API for now because Fuel 9.1 has a custom
# domain_id defined in keystone.conf which we do not know.
args = {'user': None, 'pass': None, 'tenant': None}
for a in args:
if self.conf['fuel_%s' % a]:
args[a] = self.conf['fuel_%s' % a]
else:
args[a] = self.conf['fuel_api_%s' % a]
req_data = v2_body % (args['tenant'], args['user'], args['pass'])
req = urllib2.Request("http://%s:%s/v2.0/tokens" %
(self.conf['fuel_ip'],
self.conf['fuel_api_keystone_port']), req_data,
{'Content-Type': 'application/json'})
try:
# Disabling v3 token retrieval for now
# token = urllib2.urlopen(req).info().getheader('X-Subject-Token')
result = urllib2.urlopen(req)
resp_body = result.read()
resp_json = json.loads(resp_body)
token = resp_json['access']['token']['id']
self.token = token
return True
except:
return False
def get_api_request(self, request):
if self.auth_token():
url = "http://%s:%s/api/%s" % (self.conf['fuel_ip'],
self.conf['fuel_api_port'],
request)
req = urllib2.Request(url, None, {'X-Auth-Token': self.token})
try:
result = urllib2.urlopen(req)
code = result.getcode()
if code == 200:
return result.read()
else:
self.logger.error('NodeManager: cannot get API response'
' from %s, code %s' % (url, code))
except:
pass
def get_nodes_api(self):
self.logger.info('using API to get nodes json')
nodes_json = self.get_api_request('nodes')
if nodes_json:
self.nodes_json = json.loads(nodes_json)
return True
else:
return False
def get_nodes_cli(self):
self.logger.info('using CLI to get nodes json')
fuelnode = self.nodes[self.conf['fuel_ip']]
o_auth = n_auth = ''
entropy = bool(self.conf['fuel_user']) + bool(self.conf['fuel_pass'])
if entropy == 2:
# auth for Fuel up to 8.0
o_auth = '--user %s --password %s' % (self.conf['fuel_user'],
self.conf['fuel_pass'])
# Fuel 9.0+
n_auth = 'OS_USERNAME=%s OS_PASSWORD=%s' % (self.conf['fuel_user'],
self.conf['fuel_pass'])
elif entropy == 1:
self.logger.warning('Must specify both fuel_user and fuel_pass')
cmd = 'bash -c "%s fuel node --json"' % n_auth
nodes_json, err, code = tools.ssh_node(ip=fuelnode.ip,
command=cmd,
ssh_opts=fuelnode.ssh_opts,
timeout=fuelnode.timeout,
prefix=fuelnode.prefix)
if code != 0:
self.logger.warning(('NodeManager: cannot get fuel node list from'
' CLI, will fallback. Error: %s') % err)
cmd = 'bash -c "fuel %s node --json"' % o_auth
nodes_json, err, code = tools.ssh_node(ip=fuelnode.ip,
command=cmd,
ssh_opts=fuelnode.ssh_opts,
timeout=fuelnode.timeout,
prefix=fuelnode.prefix)
if code != 0:
self.logger.warning(('NodeManager: cannot get '
'fuel node list from CLI: %s') % err)
self.nodes_json = None
return False
self.nodes_json = json.loads(nodes_json)
return True
def get_release_cli(self):
run_items = []
for key, node in self.selected_nodes.items():
run_items.append(tools.RunItem(target=node.get_release,
key=key))
result = tools.run_batch(run_items, 100, dict_result=True)
if result:
for key in result:
self.nodes[key].release = result[key]
return True
else:
return False
def nodes_init_fallbacks(self):
self.nodes_get_roles_hiera()
self.nodes_get_os()
self.nodes_get_cluster_ids()
def nodes_get_roles_hiera(self, maxthreads=100):
run_items = []
for key, node in self.selected_nodes.items():
if node.status != 'discover' and not node.roles:
run_items.append(tools.RunItem(target=node.get_roles_hiera,
key=key))
result = tools.run_batch(run_items, maxthreads, dict_result=True)
for key in result:
if result[key]:
self.nodes[key].roles = result[key]
def nodes_get_cluster_ids(self, maxthreads=100):
self.logger.debug('getting cluster ids from nodes')
run_items = []
for key, node in self.selected_nodes.items():
if not node.cluster:
run_items.append(tools.RunItem(target=node.get_cluster_id,
key=key))
result = tools.run_batch(run_items, maxthreads, dict_result=True)
for key in result:
if result[key] is not None:
self.nodes[key].cluster = result[key]
def set_nodes_release(self, clusters):
cldict = {}
for cluster in clusters:
cldict[cluster['id']] = cluster
if cldict:
for node in self.nodes.values():
if node.cluster:
node.release = cldict[node.cluster]['fuel_version']
else:
# set to n/a or may be fuel_version
if node.id != 0:
node.release = 'n/a'
self.logger.info('%s: release: %s' % (node.repr, node.release))
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Network Gaussian Process (nngp) kernel computation.
Implementaion based on
"Deep Neural Networks as Gaussian Processes" by
Jaehoon Lee, Yasaman Bahri, Roman Novak, Samuel S. Schoenholz,
Jeffrey Pennington, Jascha Sohl-Dickstein
arXiv:1711.00165 (https://arxiv.org/abs/1711.00165).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import numpy as np
import tensorflow as tf
import interp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean("use_precomputed_grid", True,
"Option to save/load pre-computed grid")
flags.DEFINE_integer(
"fraction_of_int32", 32,
"allow batches at most of size int32.max / fraction_of_int32")
class NNGPKernel(object):
"""The iterative covariance Kernel for Neural Network Gaussian Process.
Args:
depth: int, number of hidden layers in corresponding NN.
nonlin_fn: tf ops corresponding to point-wise non-linearity in corresponding
NN. e.g.) tf.nn.relu, tf.nn.sigmoid, lambda x: x * tf.nn.sigmoid(x), ...
weight_var: initial value for the weight_variances parameter.
bias_var: initial value for the bias_variance parameter.
n_gauss: Number of gaussian integration grid. Choose odd integer, so that
there is a gridpoint at 0.
n_var: Number of variance grid points.
n_corr: Number of correlation grid points.
use_fixed_point_norm: bool, normalize input to variance fixed point.
Defaults to False, normalizing input to unit norm over input dimension.
"""
def __init__(self,
depth=1,
nonlin_fn=tf.tanh,
weight_var=1.,
bias_var=1.,
n_gauss=101,
n_var=151,
n_corr=131,
max_var=100,
max_gauss=100,
use_fixed_point_norm=False,
grid_path=None,
sess=None):
self.depth = depth
self.weight_var = weight_var
self.bias_var = bias_var
self.use_fixed_point_norm = use_fixed_point_norm
self.sess = sess
if FLAGS.use_precomputed_grid and (grid_path is None):
raise ValueError("grid_path must be specified to use precomputed grid.")
self.grid_path = grid_path
self.nonlin_fn = nonlin_fn
(self.var_aa_grid, self.corr_ab_grid, self.qaa_grid,
self.qab_grid) = self.get_grid(n_gauss, n_var, n_corr, max_var, max_gauss)
if self.use_fixed_point_norm:
self.var_fixed_point_np, self.var_fixed_point = self.get_var_fixed_point()
def get_grid(self, n_gauss, n_var, n_corr, max_var, max_gauss):
"""Get covariance grid by loading or computing a new one.
"""
# File configuration for precomputed grid
if FLAGS.use_precomputed_grid:
grid_path = self.grid_path
# TODO(jaehlee) np.save have broadcasting error when n_var==n_corr.
if n_var == n_corr:
n_var += 1
grid_file_name = "grid_{0:s}_ng{1:d}_ns{2:d}_nc{3:d}".format(
self.nonlin_fn.__name__, n_gauss, n_var, n_corr)
grid_file_name += "_mv{0:d}_mg{1:d}".format(max_var, max_gauss)
# Load grid file if it exists already
if (FLAGS.use_precomputed_grid and
tf.gfile.Exists(os.path.join(grid_path, grid_file_name))):
with tf.gfile.Open(os.path.join(grid_path, grid_file_name), "rb") as f:
grid_data_np = np.load(f)
tf.logging.info("Loaded interpolation grid from %s"%
os.path.join(grid_path, grid_file_name))
grid_data = (tf.convert_to_tensor(grid_data_np[0], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[1], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[2], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[3], dtype=tf.float64))
else:
tf.logging.info("Generating interpolation grid...")
grid_data = _compute_qmap_grid(self.nonlin_fn, n_gauss, n_var, n_corr,
max_var=max_var, max_gauss=max_gauss)
if FLAGS.use_precomputed_grid:
with tf.Session() as sess:
grid_data_np = sess.run(grid_data)
tf.gfile.MakeDirs(grid_path)
with tf.gfile.Open(os.path.join(grid_path, grid_file_name), "wb") as f:
np.save(f, grid_data_np)
with tf.gfile.Open(os.path.join(grid_path, grid_file_name), "rb") as f:
grid_data_np = np.load(f)
tf.logging.info("Loaded interpolation grid from %s"%
os.path.join(grid_path, grid_file_name))
grid_data = (tf.convert_to_tensor(grid_data_np[0], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[1], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[2], dtype=tf.float64),
tf.convert_to_tensor(grid_data_np[3], dtype=tf.float64))
return grid_data
def get_var_fixed_point(self):
with tf.name_scope("get_var_fixed_point"):
# If normalized input length starts at 1.
current_qaa = self.weight_var * tf.constant(
[1.], dtype=tf.float64) + self.bias_var
diff = 1.
prev_qaa_np = 1.
it = 0
while diff > 1e-6 and it < 300:
samp_qaa = interp.interp_lin(
self.var_aa_grid, self.qaa_grid, current_qaa)
samp_qaa = self.weight_var * samp_qaa + self.bias_var
current_qaa = samp_qaa
with tf.Session() as sess:
current_qaa_np = sess.run(current_qaa)
diff = np.abs(current_qaa_np - prev_qaa_np)
it += 1
prev_qaa_np = current_qaa_np
return current_qaa_np, current_qaa
def k_diag(self, input_x, return_full=True):
"""Iteratively building the diagonal part (variance) of the NNGP kernel.
Args:
input_x: tensor of input of size [num_data, input_dim].
return_full: boolean for output to be [num_data] sized or a scalar value
for normalized inputs
Sets self.layer_qaa_dict of {layer #: qaa at the layer}
Returns:
qaa: variance at the output.
"""
with tf.name_scope("Kdiag"):
# If normalized input length starts at 1.
if self.use_fixed_point_norm:
current_qaa = self.var_fixed_point
else:
current_qaa = self.weight_var * tf.convert_to_tensor(
[1.], dtype=tf.float64) + self.bias_var
self.layer_qaa_dict = {0: current_qaa}
for l in xrange(self.depth):
with tf.name_scope("layer_%d" % l):
samp_qaa = interp.interp_lin(
self.var_aa_grid, self.qaa_grid, current_qaa)
samp_qaa = self.weight_var * samp_qaa + self.bias_var
self.layer_qaa_dict[l + 1] = samp_qaa
current_qaa = samp_qaa
if return_full:
qaa = tf.tile(current_qaa[:1], ([input_x.shape[0].value]))
else:
qaa = current_qaa[0]
return qaa
def k_full(self, input1, input2=None):
"""Iteratively building the full NNGP kernel.
"""
input1 = self._input_layer_normalization(input1)
if input2 is None:
input2 = input1
else:
input2 = self._input_layer_normalization(input2)
with tf.name_scope("k_full"):
cov_init = tf.matmul(
input1, input2, transpose_b=True) / input1.shape[1].value
self.k_diag(input1)
q_aa_init = self.layer_qaa_dict[0]
q_ab = cov_init
q_ab = self.weight_var * q_ab + self.bias_var
corr = q_ab / q_aa_init[0]
if FLAGS.fraction_of_int32 > 1:
batch_size, batch_count = self._get_batch_size_and_count(input1, input2)
with tf.name_scope("q_ab"):
q_ab_all = []
for b_x in range(batch_count):
with tf.name_scope("batch_%d" % b_x):
corr_flat_batch = corr[
batch_size * b_x : batch_size * (b_x + 1), :]
corr_flat_batch = tf.reshape(corr_flat_batch, [-1])
for l in xrange(self.depth):
with tf.name_scope("layer_%d" % l):
q_aa = self.layer_qaa_dict[l]
q_ab = interp.interp_lin_2d(x=self.var_aa_grid,
y=self.corr_ab_grid,
z=self.qab_grid,
xp=q_aa,
yp=corr_flat_batch)
q_ab = self.weight_var * q_ab + self.bias_var
corr_flat_batch = q_ab / self.layer_qaa_dict[l + 1][0]
q_ab_all.append(q_ab)
q_ab_all = tf.parallel_stack(q_ab_all)
else:
with tf.name_scope("q_ab"):
corr_flat = tf.reshape(corr, [-1])
for l in xrange(self.depth):
with tf.name_scope("layer_%d" % l):
q_aa = self.layer_qaa_dict[l]
q_ab = interp.interp_lin_2d(x=self.var_aa_grid,
y=self.corr_ab_grid,
z=self.qab_grid,
xp=q_aa,
yp=corr_flat)
q_ab = self.weight_var * q_ab + self.bias_var
corr_flat = q_ab / self.layer_qaa_dict[l+1][0]
q_ab_all = q_ab
return tf.reshape(q_ab_all, cov_init.shape, "qab")
def _input_layer_normalization(self, x):
"""Input normalization to unit variance or fixed point variance.
"""
with tf.name_scope("input_layer_normalization"):
# Layer norm, fix to unit variance
eps = 1e-15
mean, var = tf.nn.moments(x, axes=[1], keep_dims=True)
x_normalized = (x - mean) / tf.sqrt(var + eps)
if self.use_fixed_point_norm:
x_normalized *= tf.sqrt(
(self.var_fixed_point[0] - self.bias_var) / self.weight_var)
return x_normalized
def _get_batch_size_and_count(self, input1, input2):
"""Compute batch size and number to split when input size is large.
Args:
input1: tensor, input tensor to covariance matrix
input2: tensor, second input tensor to covariance matrix
Returns:
batch_size: int, size of each batch
batch_count: int, number of batches
"""
input1_size = input1.shape[0].value
input2_size = input2.shape[0].value
batch_size = min(np.iinfo(np.int32).max //
(FLAGS.fraction_of_int32 * input2_size), input1_size)
while input1_size % batch_size != 0:
batch_size -= 1
batch_count = input1_size // batch_size
return batch_size, batch_count
def _fill_qab_slice(idx, z1, z2, var_aa, corr_ab, nonlin_fn):
"""Helper method used for parallel computation for full qab."""
log_weights_ab_unnorm = -(z1**2 + z2**2 - 2 * z1 * z2 * corr_ab) / (
2 * var_aa[idx] * (1 - corr_ab**2))
log_weights_ab = log_weights_ab_unnorm - tf.reduce_logsumexp(
log_weights_ab_unnorm, axis=[0, 1], keep_dims=True)
weights_ab = tf.exp(log_weights_ab)
qab_slice = tf.reduce_sum(
nonlin_fn(z1) * nonlin_fn(z2) * weights_ab, axis=[0, 1])
qab_slice = tf.Print(qab_slice, [idx], "Generating slice: ")
return qab_slice
def _compute_qmap_grid(nonlin_fn,
n_gauss,
n_var,
n_corr,
log_spacing=False,
min_var=1e-8,
max_var=100.,
max_corr=0.99999,
max_gauss=10.):
"""Construct graph for covariance grid to use for kernel computation.
Given variance and correlation (or covariance) of pre-activation, perform
Gaussian integration to get covariance of post-activation.
Raises:
ValueError: if n_gauss is even integer.
Args:
nonlin_fn: tf ops corresponding to point-wise non-linearity in
corresponding NN. e.g.) tf.nn.relu, tf.nn.sigmoid,
lambda x: x * tf.nn.sigmoid(x), ...
n_gauss: int, number of Gaussian integration points with equal spacing
between (-max_gauss, max_gauss). Choose odd integer, so that there is a
gridpoint at 0.
n_var: int, number of variance grid points.get_grid
n_corr: int, number of correlation grid points.
log_spacing: bool, whether to use log-linear instead of linear variance
grid.
min_var: float, smallest variance value to generate grid.
max_var: float, largest varaince value to generate grid.
max_corr: float, largest correlation value to generate grid. Should be
slightly smaller than 1.
max_gauss: float, range (-max_gauss, max_gauss) for Gaussian integration.
Returns:
var_grid_pts: tensor of size [n_var], grid points where variance are
evaluated at.
corr_grid_pts: tensor of size [n_corr], grid points where correlation are
evalutated at.
qaa: tensor of size [n_var], variance of post-activation at given
pre-activation variance.
qab: tensor of size [n_var, n_corr], covariance of post-activation at
given pre-activation variance and correlation.
"""
if n_gauss % 2 != 1:
raise ValueError("n_gauss=%d should be an odd integer" % n_gauss)
with tf.name_scope("compute_qmap_grid"):
min_var = tf.convert_to_tensor(min_var, dtype=tf.float64)
max_var = tf.convert_to_tensor(max_var, dtype=tf.float64)
max_corr = tf.convert_to_tensor(max_corr, dtype=tf.float64)
max_gauss = tf.convert_to_tensor(max_gauss, dtype=tf.float64)
# Evaluation points for numerical integration over a Gaussian.
z1 = tf.reshape(tf.linspace(-max_gauss, max_gauss, n_gauss), (-1, 1, 1))
z2 = tf.transpose(z1, perm=[1, 0, 2])
if log_spacing:
var_aa = tf.exp(tf.linspace(tf.log(min_var), tf.log(max_var), n_var))
else:
# Evaluation points for pre-activations variance and correlation
var_aa = tf.linspace(min_var, max_var, n_var)
corr_ab = tf.reshape(tf.linspace(-max_corr, max_corr, n_corr), (1, 1, -1))
# compute q_aa
log_weights_aa_unnorm = -0.5 * (z1**2 / tf.reshape(var_aa, [1, 1, -1]))
log_weights_aa = log_weights_aa_unnorm - tf.reduce_logsumexp(
log_weights_aa_unnorm, axis=[0, 1], keep_dims=True)
weights_aa = tf.exp(log_weights_aa)
qaa = tf.reduce_sum(nonlin_fn(z1)**2 * weights_aa, axis=[0, 1])
# compute q_ab
# weights to reweight uniform samples by, for q_ab.
# (weights are probability of z1, z2 under Gaussian
# w/ variance var_aa and covariance var_aa*corr_ab)
# weights_ab will have shape [n_g, n_g, n_v, n_c]
def fill_qab_slice(idx):
return _fill_qab_slice(idx, z1, z2, var_aa, corr_ab, nonlin_fn)
qab = tf.map_fn(
fill_qab_slice,
tf.range(n_var),
dtype=tf.float64,
parallel_iterations=multiprocessing.cpu_count())
var_grid_pts = tf.reshape(var_aa, [-1])
corr_grid_pts = tf.reshape(corr_ab, [-1])
return var_grid_pts, corr_grid_pts, qaa, qab
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.