gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# encoding: utf-8
"""
Test suite for the docx.styles.style module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.enum.style import WD_STYLE_TYPE
from docx.styles.style import (
BaseStyle, _CharacterStyle, _ParagraphStyle, _NumberingStyle,
StyleFactory, _TableStyle
)
from docx.text.font import Font
from docx.text.parfmt import ParagraphFormat
from ..unitutil.cxml import element, xml
from ..unitutil.mock import call, class_mock, function_mock, instance_mock
class DescribeStyleFactory(object):
def it_constructs_the_right_type_of_style(self, factory_fixture):
style_elm, StyleCls_, style_ = factory_fixture
style = StyleFactory(style_elm)
StyleCls_.assert_called_once_with(style_elm)
assert style is style_
# fixtures -------------------------------------------------------
@pytest.fixture(params=['paragraph', 'character', 'table', 'numbering'])
def factory_fixture(
self, request, paragraph_style_, _ParagraphStyle_,
character_style_, _CharacterStyle_, table_style_, _TableStyle_,
numbering_style_, _NumberingStyle_):
type_attr_val = request.param
StyleCls_, style_mock = {
'paragraph': (_ParagraphStyle_, paragraph_style_),
'character': (_CharacterStyle_, character_style_),
'table': (_TableStyle_, table_style_),
'numbering': (_NumberingStyle_, numbering_style_),
}[request.param]
style_cxml = 'w:style{w:type=%s}' % type_attr_val
style_elm = element(style_cxml)
return style_elm, StyleCls_, style_mock
# fixture components -----------------------------------
@pytest.fixture
def _ParagraphStyle_(self, request, paragraph_style_):
return class_mock(
request, 'docx.styles.style._ParagraphStyle',
return_value=paragraph_style_
)
@pytest.fixture
def paragraph_style_(self, request):
return instance_mock(request, _ParagraphStyle)
@pytest.fixture
def _CharacterStyle_(self, request, character_style_):
return class_mock(
request, 'docx.styles.style._CharacterStyle',
return_value=character_style_
)
@pytest.fixture
def character_style_(self, request):
return instance_mock(request, _CharacterStyle)
@pytest.fixture
def _TableStyle_(self, request, table_style_):
return class_mock(
request, 'docx.styles.style._TableStyle',
return_value=table_style_
)
@pytest.fixture
def table_style_(self, request):
return instance_mock(request, _TableStyle)
@pytest.fixture
def _NumberingStyle_(self, request, numbering_style_):
return class_mock(
request, 'docx.styles.style._NumberingStyle',
return_value=numbering_style_
)
@pytest.fixture
def numbering_style_(self, request):
return instance_mock(request, _NumberingStyle)
class DescribeBaseStyle(object):
def it_knows_its_style_id(self, id_get_fixture):
style, expected_value = id_get_fixture
assert style.style_id == expected_value
def it_can_change_its_style_id(self, id_set_fixture):
style, new_value, expected_xml = id_set_fixture
style.style_id = new_value
assert style._element.xml == expected_xml
def it_knows_its_type(self, type_get_fixture):
style, expected_value = type_get_fixture
assert style.type == expected_value
def it_knows_its_name(self, name_get_fixture):
style, expected_value = name_get_fixture
assert style.name == expected_value
def it_can_change_its_name(self, name_set_fixture):
style, new_value, expected_xml = name_set_fixture
style.name = new_value
assert style._element.xml == expected_xml
def it_knows_whether_its_a_builtin_style(self, builtin_get_fixture):
style, expected_value = builtin_get_fixture
assert style.builtin is expected_value
def it_knows_whether_its_hidden(self, hidden_get_fixture):
style, expected_value = hidden_get_fixture
assert style.hidden == expected_value
def it_can_change_whether_its_hidden(self, hidden_set_fixture):
style, value, expected_xml = hidden_set_fixture
style.hidden = value
assert style._element.xml == expected_xml
def it_knows_its_sort_order(self, priority_get_fixture):
style, expected_value = priority_get_fixture
assert style.priority == expected_value
def it_can_change_its_sort_order(self, priority_set_fixture):
style, value, expected_xml = priority_set_fixture
style.priority = value
assert style._element.xml == expected_xml
def it_knows_whether_its_unhide_when_used(self, unhide_get_fixture):
style, expected_value = unhide_get_fixture
assert style.unhide_when_used == expected_value
def it_can_change_its_unhide_when_used_value(self, unhide_set_fixture):
style, value, expected_xml = unhide_set_fixture
style.unhide_when_used = value
assert style._element.xml == expected_xml
def it_knows_its_quick_style_setting(self, quick_get_fixture):
style, expected_value = quick_get_fixture
assert style.quick_style == expected_value
def it_can_change_its_quick_style_setting(self, quick_set_fixture):
style, new_value, expected_xml = quick_set_fixture
style.quick_style = new_value
assert style._element.xml == expected_xml
def it_knows_whether_its_locked(self, locked_get_fixture):
style, expected_value = locked_get_fixture
assert style.locked == expected_value
def it_can_change_whether_its_locked(self, locked_set_fixture):
style, value, expected_xml = locked_set_fixture
style.locked = value
assert style._element.xml == expected_xml
def it_can_delete_itself_from_the_document(self, delete_fixture):
style, styles, expected_xml = delete_fixture
style.delete()
assert styles.xml == expected_xml
assert style._element is None
# fixture --------------------------------------------------------
@pytest.fixture(params=[
('w:style', True),
('w:style{w:customStyle=0}', True),
('w:style{w:customStyle=1}', False),
])
def builtin_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture
def delete_fixture(self):
styles = element('w:styles/w:style')
style = BaseStyle(styles[0])
expected_xml = xml('w:styles')
return style, styles, expected_xml
@pytest.fixture(params=[
('w:style', False),
('w:style/w:semiHidden', True),
('w:style/w:semiHidden{w:val=0}', False),
('w:style/w:semiHidden{w:val=1}', True),
])
def hidden_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', True, 'w:style/w:semiHidden'),
('w:style/w:semiHidden{w:val=0}', True, 'w:style/w:semiHidden'),
('w:style/w:semiHidden{w:val=1}', True, 'w:style/w:semiHidden'),
('w:style', False, 'w:style'),
('w:style/w:semiHidden', False, 'w:style'),
('w:style/w:semiHidden{w:val=1}', False, 'w:style'),
])
def hidden_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(params=[
('w:style', None),
('w:style{w:styleId=Foobar}', 'Foobar'),
])
def id_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', 'Foo', 'w:style{w:styleId=Foo}'),
('w:style{w:styleId=Foo}', 'Bar', 'w:style{w:styleId=Bar}'),
('w:style{w:styleId=Bar}', None, 'w:style'),
('w:style', None, 'w:style'),
])
def id_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(params=[
('w:style', False),
('w:style/w:locked', True),
('w:style/w:locked{w:val=0}', False),
('w:style/w:locked{w:val=1}', True),
])
def locked_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', True, 'w:style/w:locked'),
('w:style/w:locked{w:val=0}', True, 'w:style/w:locked'),
('w:style/w:locked{w:val=1}', True, 'w:style/w:locked'),
('w:style', False, 'w:style'),
('w:style/w:locked', False, 'w:style'),
('w:style/w:locked{w:val=1}', False, 'w:style'),
])
def locked_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(params=[
('w:style{w:type=table}', None),
('w:style{w:type=table}/w:name{w:val=Boofar}', 'Boofar'),
('w:style{w:type=table}/w:name{w:val=heading 1}', 'Heading 1'),
])
def name_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', 'Foo', 'w:style/w:name{w:val=Foo}'),
('w:style/w:name{w:val=Foo}', 'Bar', 'w:style/w:name{w:val=Bar}'),
('w:style/w:name{w:val=Bar}', None, 'w:style'),
])
def name_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(params=[
('w:style', None),
('w:style/w:uiPriority{w:val=42}', 42),
])
def priority_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', 42,
'w:style/w:uiPriority{w:val=42}'),
('w:style/w:uiPriority{w:val=42}', 24,
'w:style/w:uiPriority{w:val=24}'),
('w:style/w:uiPriority{w:val=24}', None,
'w:style'),
])
def priority_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
@pytest.fixture(params=[
('w:style', False),
('w:style/w:qFormat', True),
('w:style/w:qFormat{w:val=0}', False),
('w:style/w:qFormat{w:val=on}', True),
])
def quick_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', True, 'w:style/w:qFormat'),
('w:style/w:qFormat', False, 'w:style'),
('w:style/w:qFormat', True, 'w:style/w:qFormat'),
('w:style/w:qFormat{w:val=0}', False, 'w:style'),
('w:style/w:qFormat{w:val=on}', True, 'w:style/w:qFormat'),
])
def quick_set_fixture(self, request):
style_cxml, new_value, expected_style_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_style_cxml)
return style, new_value, expected_xml
@pytest.fixture(params=[
('w:style', WD_STYLE_TYPE.PARAGRAPH),
('w:style{w:type=paragraph}', WD_STYLE_TYPE.PARAGRAPH),
('w:style{w:type=character}', WD_STYLE_TYPE.CHARACTER),
('w:style{w:type=numbering}', WD_STYLE_TYPE.LIST),
])
def type_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', False),
('w:style/w:unhideWhenUsed', True),
('w:style/w:unhideWhenUsed{w:val=0}', False),
('w:style/w:unhideWhenUsed{w:val=1}', True),
])
def unhide_get_fixture(self, request):
style_cxml, expected_value = request.param
style = BaseStyle(element(style_cxml))
return style, expected_value
@pytest.fixture(params=[
('w:style', True,
'w:style/w:unhideWhenUsed'),
('w:style/w:unhideWhenUsed', False,
'w:style'),
('w:style/w:unhideWhenUsed{w:val=0}', True,
'w:style/w:unhideWhenUsed'),
('w:style/w:unhideWhenUsed{w:val=1}', True,
'w:style/w:unhideWhenUsed'),
('w:style/w:unhideWhenUsed{w:val=1}', False,
'w:style'),
('w:style', False,
'w:style'),
])
def unhide_set_fixture(self, request):
style_cxml, value, expected_cxml = request.param
style = BaseStyle(element(style_cxml))
expected_xml = xml(expected_cxml)
return style, value, expected_xml
class Describe_CharacterStyle(object):
def it_knows_which_style_it_is_based_on(self, base_get_fixture):
style, StyleFactory_, StyleFactory_calls, base_style_ = (
base_get_fixture
)
base_style = style.base_style
assert StyleFactory_.call_args_list == StyleFactory_calls
assert base_style == base_style_
def it_can_change_its_base_style(self, base_set_fixture):
style, value, expected_xml = base_set_fixture
style.base_style = value
assert style._element.xml == expected_xml
def it_provides_access_to_its_font(self, font_fixture):
style, Font_, font_ = font_fixture
font = style.font
Font_.assert_called_once_with(style._element)
assert font is font_
# fixture --------------------------------------------------------
@pytest.fixture(params=[
('w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Foo})',
1, 0),
('w:styles/(w:style{w:styleId=Foo},w:style/w:basedOn{w:val=Bar})',
1, -1),
('w:styles/w:style',
0, -1),
])
def base_get_fixture(self, request, StyleFactory_):
styles_cxml, style_idx, base_style_idx = request.param
styles = element(styles_cxml)
style = _CharacterStyle(styles[style_idx])
if base_style_idx >= 0:
base_style = styles[base_style_idx]
StyleFactory_calls = [call(base_style)]
expected_value = StyleFactory_.return_value
else:
StyleFactory_calls = []
expected_value = None
return style, StyleFactory_, StyleFactory_calls, expected_value
@pytest.fixture(params=[
('w:style', 'Foo',
'w:style/w:basedOn{w:val=Foo}'),
('w:style/w:basedOn{w:val=Foo}', 'Bar',
'w:style/w:basedOn{w:val=Bar}'),
('w:style/w:basedOn{w:val=Bar}', None,
'w:style'),
])
def base_set_fixture(self, request, style_):
style_cxml, base_style_id, expected_style_cxml = request.param
style = _CharacterStyle(element(style_cxml))
style_.style_id = base_style_id
base_style = style_ if base_style_id is not None else None
expected_xml = xml(expected_style_cxml)
return style, base_style, expected_xml
@pytest.fixture
def font_fixture(self, Font_, font_):
style = _CharacterStyle(element('w:style'))
return style, Font_, font_
# fixture components ---------------------------------------------
@pytest.fixture
def Font_(self, request, font_):
return class_mock(
request, 'docx.styles.style.Font', return_value=font_
)
@pytest.fixture
def font_(self, request):
return instance_mock(request, Font)
@pytest.fixture
def style_(self, request):
return instance_mock(request, BaseStyle)
@pytest.fixture
def StyleFactory_(self, request):
return function_mock(request, 'docx.styles.style.StyleFactory')
class Describe_ParagraphStyle(object):
def it_knows_its_next_paragraph_style(self, next_get_fixture):
style, expected_value = next_get_fixture
assert style.next_paragraph_style == expected_value
def it_can_change_its_next_paragraph_style(self, next_set_fixture):
style, next_style, expected_xml = next_set_fixture
style.next_paragraph_style = next_style
assert style.element.xml == expected_xml
def it_provides_access_to_its_paragraph_format(self, parfmt_fixture):
style, ParagraphFormat_, paragraph_format_ = parfmt_fixture
paragraph_format = style.paragraph_format
ParagraphFormat_.assert_called_once_with(style._element)
assert paragraph_format is paragraph_format_
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('H1', 'Body'),
('H2', 'H2'),
('Body', 'Body'),
('Foo', 'Foo'),
])
def next_get_fixture(self, request):
style_name, next_style_name = request.param
styles = element(
'w:styles/('
'w:style{w:type=paragraph,w:styleId=H1}/w:next{w:val=Body},'
'w:style{w:type=paragraph,w:styleId=H2}/w:next{w:val=Char},'
'w:style{w:type=paragraph,w:styleId=Body},'
'w:style{w:type=paragraph,w:styleId=Foo}/w:next{w:val=Bar},'
'w:style{w:type=character,w:styleId=Char})'
)
style_names = ['H1', 'H2', 'Body', 'Foo', 'Char']
style_elm = styles[style_names.index(style_name)]
next_style_elm = styles[style_names.index(next_style_name)]
style = _ParagraphStyle(style_elm)
if style_name == 'H1':
next_style = _ParagraphStyle(next_style_elm)
else:
next_style = style
return style, next_style
@pytest.fixture(params=[
('H', 'B', 'w:style{w:type=paragraph,w:styleId=H}/w:next{w:val=B}'),
('H', None, 'w:style{w:type=paragraph,w:styleId=H}'),
('H', 'H', 'w:style{w:type=paragraph,w:styleId=H}'),
])
def next_set_fixture(self, request):
style_name, next_style_name, style_cxml = request.param
styles = element(
'w:styles/('
'w:style{w:type=paragraph,w:styleId=H},'
'w:style{w:type=paragraph,w:styleId=B})'
)
style_elms = {'H': styles[0], 'B': styles[1]}
style = _ParagraphStyle(style_elms[style_name])
next_style = (
None if next_style_name is None else
_ParagraphStyle(style_elms[next_style_name])
)
expected_xml = xml(style_cxml)
return style, next_style, expected_xml
@pytest.fixture
def parfmt_fixture(self, ParagraphFormat_, paragraph_format_):
style = _ParagraphStyle(element('w:style'))
return style, ParagraphFormat_, paragraph_format_
# fixture components ---------------------------------------------
@pytest.fixture
def ParagraphFormat_(self, request, paragraph_format_):
return class_mock(
request, 'docx.styles.style.ParagraphFormat',
return_value=paragraph_format_
)
@pytest.fixture
def paragraph_format_(self, request):
return instance_mock(request, ParagraphFormat)
| |
# coding=UTF-8
from collections import deque
import datetime
import time
import re
import json
import MySQLdb
from django.db import models, IntegrityError
from snh.models.common import *
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
import snhlogger
logger = snhlogger.init_logger(__name__, "facebook_model.log")
from settings import DEBUGCONTROL, dLogger, DEFAULT_API_APPS
debugging = DEBUGCONTROL['facebookmodel']
if debugging: print "DEBBUGING ENABLED IN %s"%__name__
class FacebookHarvester(AbstractHaverster):
class Meta:
app_label = "snh"
app_id = models.CharField(max_length=255, blank=True,default=DEFAULT_API_APPS['facebook']['app_id']) #TODO: permettre que chaque Harvester aie sa propre app.
client = None
fbusers_to_harvest = models.ManyToManyField('FBUser', related_name='harvester_in_charge')
update_likes = models.BooleanField()
def set_client(self, client):
self.client = client
def get_client(self):
if self.client is None:
raise Exception("you need to set the client!!")
return self.client
def end_current_harvest(self):
super(FacebookHarvester, self).end_current_harvest()
@dLogger.debug
def api_call(self, method, params):
if debugging:
dLogger.log("<FacebookHarvester>::api_call()")
dLogger.log(" method: %s"%(method))
dLogger.log(" params: %s"%(params))
super(FacebookHarvester, self).api_call(method, params)
c = self.get_client()
metp = getattr(c, method)
ret = metp(**params)
#if debugging: dLogger.log(" ret: %s"%(ret))
return ret
def get_last_harvested_user(self):
return None
def get_current_harvested_user(self):
return None
def get_next_user_to_harvest(self):
return None
@dLogger.debug
def get_stats(self):
parent_stats = super(FacebookHarvester, self).get_stats()
return parent_stats
class FacebookSessionKey(models.Model):
''' Stores a key returned by the authentification of a user through the Facebook javascript sdk.
Normally, there should be only one instance of FacebookSessionKey per server.
'''
user_access_token = models.CharField(max_length=255, null=True)
updated_time = models.DateTimeField(null=True)
class Meta:
app_label = "snh"
def get_access_token(self):
return self.user_access_token
@dLogger.debug
def set_access_token(self, accessToken):
if debugging: dLogger.log("<FacebookSessionKey>::set_access_token()")
self.user_access_token = accessToken
self.updated_time = datetime.utcnow()
self.save()
def get_last_update_time(self):
return self.updated_time
class FBResult(models.Model):
''' FBResult is used to temporarily store the raw data obtained from Facebook graph API batch methods.
Can either contain a FBPost, FBComment or FBPost.likes raw page to be analysed later.
'''
class Meta:
app_label = "snh"
def __unicode__(self):
return self.fid
harvester = models.ForeignKey("FacebookHarvester")
result = models.TextField(null=True)
fid = models.TextField(null=True)
ftype = models.TextField(null=True)
parent = models.TextField(null=True)
class FBUser(models.Model):
class Meta:
app_label = "snh"
def __unicode__(self):
if self.username:
return self.username.encode('unicode-escape')
elif self.name:
return self.name.encode('unicode-escape')
else:
return unicode(self.fid)
def related_label(self):
return u"%s (%s)" % (self.username if self.username else self.name, self.pmk_id)
pmk_id = models.AutoField(primary_key=True)
fid = models.CharField(max_length=255, null=True, unique=True)
name = models.CharField(max_length=255, null=True)
username = models.CharField(max_length=255, null=True, blank=True)
website = models.ForeignKey('URL', related_name="fbuser.website", null=True)
link = models.ForeignKey('URL', related_name="fbuser.link", null=True)
first_name = models.CharField(max_length=255, null=True)
last_name = models.CharField(max_length=255, null=True)
gender = models.CharField(max_length=255, null=True)
locale = models.CharField(max_length=255, null=True)
languages_raw = models.TextField(null=True) #not supported but saved
third_party_id = models.CharField(max_length=255, null=True)
installed_raw = models.TextField(null=True) #not supported but saved
timezone_raw = models.TextField(null=True) #not supported but saved
updated_time = models.DateTimeField(null=True)
verified = models.BooleanField()
bio = models.TextField(null=True)
birthday = models.DateTimeField(null=True)
education_raw = models.TextField(null=True) #not supported but saved
email = models.CharField(max_length=255, null=True)
hometown = models.CharField(max_length=255, null=True)
interested_in_raw = models.TextField(null=True) #not supported but saved
location_raw = models.TextField(null=True) #not supported but saved
political = models.TextField(null=True)
favorite_athletes_raw = models.TextField(null=True) #not supported but saved
favorite_teams_raw = models.TextField(null=True) #not supported but saved
quotes = models.TextField(max_length=255, null=True)
relationship_status = models.TextField(null=True)
religion = models.TextField(null=True)
significant_other_raw = models.TextField(null=True) #not supported but saved
video_upload_limits_raw = models.TextField(null=True) #not supported but saved
work_raw = models.TextField(null=True) #not supported but saved
category = models.TextField(null=True)
likes = models.IntegerField(null=True)
about = models.TextField(null=True)
phone = models.CharField(max_length=255, null=True)
checkins = models.IntegerField(null=True)
picture = models.ForeignKey('URL', related_name="fbpagedesc.picture", null=True)
talking_about_count = models.IntegerField(null=True)
error_triggered = models.BooleanField()
error_on_update = models.BooleanField()
@dLogger.debug
def update_url_fk(self, self_prop, face_prop, facebook_model):
#if debugging: dLogger.log("<FBUser: %s>::update_url_fk(%s)"%(self.name, face_prop))
model_changed = False
if face_prop in facebook_model:
prop_val = facebook_model[face_prop]
if self_prop is None or self_prop.original_url != prop_val:
url = None
try:
url = URL.objects.filter(original_url=prop_val)[0]
except:
pass
if url is None:
url = URL(original_url=prop_val)
url.save()
self_prop = url
model_changed = True
#if debugging: dLogger.log(" has changed: %s"%model_changed)
return model_changed, self_prop
@dLogger.debug
def update_from_facebook(self, fb_user):
if debugging:
dLogger.log("<FBUser: %s>::update_from_facebook()"%self.name)
#dLogger.pretty(fb_user)
if 'body' in fb_user:
fb_user = json.loads(fb_user['body'])
if 'error' in fb_user:
raise(BaseException(fb_user['error']))
model_changed = False
props_to_check = {
u"fid":u"id",
u"name":u"name",
u"username":u"username",
u"first_name":u"first_name",
u"last_name":u"last_name",
u"gender":u"gender",
u"locale":u"locale",
u"languages_raw":u"languages",
u"third_party_id":u"third_party_id",
u"installed_raw":u"installed",
u"timezone_raw":u"timezone",
u"verified":u"verified",
u"bio":u"bio",
u"education_raw":u"educaction",
u"email":u"email",
u"hometown":u"hometown",
u"interested_in_raw":u"interested_in",
u"location_raw":u"location",
u"political":u"political",
u"favorite_athletes_raw":u"favorite_athletes",
u"favorite_teams":u"favorite_teams",
u"quotes":u"quotes",
u"relationship_status":u"relationship_status",
u"religion":u"religion",
u"significant_other_raw":u"significant_other",
u"video_upload_limits_raw":u"video_upload_limits",
u"work_raw":u"work",
u"category":u"category",
u"likes":u"likes",
u"location_raw":u"location",
u"phone":u"phone",
u"checkins":u"checkins",
u"about":u"about",
u"talking_about_count":u"talking_about_count",
}
#date_to_check = {"birthday":"birthday"}
date_to_check = {}
for prop in props_to_check:
#if debugging: dLogger.log(" props_to_check[prop]: %s"%props_to_check[prop])
if props_to_check[prop] in fb_user and unicode(self.__dict__[prop]) != unicode(fb_user[props_to_check[prop]]):
#if debugging: dLogger.log(" fb_user[props_to_check[%s]]: %s"%(props_to_check[prop], fb_user[props_to_check[prop]]))
self.__dict__[prop] = fb_user[props_to_check[prop]]
#print "prop changed. %s = %s" % (prop, self.__dict__[prop])
model_changed = True
if debugging: dLogger.log(" %s has changed: %s"%(prop, self.__dict__[prop]))
for prop in date_to_check:
if date_to_check[prop] in fb_user and self.__dict__[prop] != fb_user[date_to_check[prop]]:
date_val = datetime.strptime(fb_user[prop],'%m/%d/%Y')
if self.__dict__[prop] != date_val:
self.__dict__[prop] = date_val
model_changed = True
(changed, self_prop) = self.update_url_fk(self.website, "website", fb_user)
if changed:
self.website = self_prop
model_changed = True
(changed, self_prop) = self.update_url_fk(self.link, "link", fb_user)
if changed:
self.link = self_prop
model_changed = True
(changed, self_prop) = self.update_url_fk(self.picture, "picture", fb_user)
if changed:
self.picture = self_prop
model_changed = True
if model_changed:
self.model_update_date = datetime.utcnow()
self.error_on_update = False
#print self.pmk_id, self.fid, self
try:
self.save()
except:
if self.name:
self.name = self.name.encode('unicode-escape')
if self.about:
self.about = self.about.encode('unicode-escape')
if debugging: dLogger.log(" updated user data: %s"%self)
return model_changed
class FBPost(models.Model):
class Meta:
app_label = "snh"
def __unicode__(self):
return "%s - %s"%(self.user, self.ftype)
pmk_id = models.AutoField(primary_key=True)
user = models.ForeignKey('FBUser', related_name='postsOnWall') #person on which wall the post apears =/= ffrom
fid = models.CharField(max_length=255, null=True, unique=True)
ffrom = models.ForeignKey('FBUser', related_name='postedStatuses', null=True) #person who posted this =/= user
message = models.TextField(null=True)
message_tags_raw = models.TextField(null=True) #not supported but saved
picture = models.ForeignKey('URL', related_name="fbpost.picture", null=True)
link = models.ForeignKey('URL', related_name="fbpost.link", null=True)
name = models.CharField(max_length=255, null=True)
caption = models.TextField(null=True)
description = models.TextField(null=True)
source = models.ForeignKey('URL', related_name="fbpost.source", null=True)
properties_raw = models.TextField(null=True) #not supported but saved
icon = models.ForeignKey('URL', related_name="fbpost.icon", null=True)
#actions = array of objects containing the name and link #will not be supported
privacy_raw = models.TextField(null=True) #not supported but saved
ftype = models.CharField(max_length=255, null=True)
likes_from = models.ManyToManyField('FBUser', related_name='fbpost.likes_from', null=True)
likes_count = models.IntegerField(null=True)
comments_count = models.IntegerField(null=True)
shares_count = models.IntegerField(null=True)
place_raw = models.TextField(null=True) #not supported but saved
story = models.TextField(null=True)
story_tags_raw = models.TextField(null=True) #not supported but saved
object_id = models.BigIntegerField(null=True)
application_raw = models.TextField(null=True) #not supported but saved
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
error_on_update = models.BooleanField()
#@dLogger.debug
def get_existing_user(self, param):
#if debugging: dLogger.log("<FBPost: %s>::get_existing_user()"%self.fid)
user = None
try:
user = FBUser.objects.get(**param)
except MultipleObjectsReturned:
logger.debug(u">>>>MULTIPLE USER")
user = FBUser.objects.filter(**param)[0]
except ObjectDoesNotExist:
logger.debug(u">>>>DOES NOT EXISTS")
pass
#if debugging: dLogger.log(" user returned: %s"%user)
return user
#@dLogger.debug
def update_url_fk(self, self_prop, face_prop, facebook_model):
#if debugging: dLogger.log("<FBPost: %s>::update_url_fk()"%self.fid)
model_changed = False
if face_prop in facebook_model:
prop_val = facebook_model[face_prop]
if self_prop is None or self_prop.original_url != prop_val:
url = None
try:
url = URL.objects.filter(original_url=prop_val)[0]
except:
pass
if url is None:
url = URL(original_url=prop_val)
url.save()
self_prop = url
model_changed = True
return model_changed, self_prop
@dLogger.debug
def update_user_fk(self, self_prop, face_prop, facebook_model):
#if debugging: dLogger.log("<FBPost: %s>::update_user_fk()"%self.fid)
model_changed = False
if face_prop in facebook_model:
prop_val = facebook_model[face_prop]
if prop_val and (self_prop is None or self_prop.fid != prop_val["id"]):
user = None
user = self.get_existing_user({"fid__exact":prop_val["id"]})
if not user:
try:
user = FBUser()
user.update_from_facebook(prop_val)
if debugging: dLogger.log(" new user created: %s"%user)
except IntegrityError:
user = self.get_existing_user({"fid__exact":prop_val["id"]})
if user:
user.update_from_facebook(prop_val)
else:
logger.debug(u">>>>CRITICAL CANT UPDATED DUPLICATED USER %s" % prop_val["id"])
self_prop = user
#print self_prop, user.name, prop_val
model_changed = True
return model_changed, self_prop
@dLogger.debug
def update_likes_from_facebook(self, likes):
if debugging:
dLogger.log("<FBPost: %s>::update_likes_from_facebook()"%self.fid)
#dLogger.log(' likes: %s'%likes)
model_changed = False
for fbuser in likes:
if self.likes_from.filter(fid__exact=fbuser["id"]).count() == 0:
user_like = None
user_like = self.get_existing_user({"fid__exact":fbuser["id"]})
if not user_like:
try:
user_like = FBUser(fid=fbuser["id"])
user_like.save()
except IntegrityError:
user_like = self.get_existing_user({"fid__exact":fbuser["id"]})
if user_like:
user_like.update_from_facebook(fbuser)
else:
logger.debug(u">>>>CRITICAL CANT UPDATED DUPLICATED USER %s" % fbuser["id"])
if user_like:
user_like.update_from_facebook(fbuser)
if debugging: dLogger.log(" new user created from like: %s"%user_like)
if user_like not in self.likes_from.all():
self.likes_from.add(user_like)
model_changed = True
if model_changed:
self.model_update_date = datetime.utcnow()
self.error_on_update = False
self.save()
if debugging: dLogger.log(" updated data!")
return model_changed
@dLogger.debug
def update_from_facebook(self, facebook_model, user):
if debugging:
dLogger.log("<FBPost: %s>::update_from_facebook()"%self.fid)
#dLogger.log(" facebook_model: %s"%facebook_model)
model_changed = False
props_to_check = {
u"fid":u"id",
u"message":u"message",
u"message_tags_raw":u"message_tags",
u"name":u"name",
u"caption":u"caption",
u"description":u"description",
#u"description":u"subject",
u"properties_raw":u"properties",
u"privacy_raw":u"privacy",
u"ftype":u"type",
u"place_raw":u"place",
u"story":u"story",
u"story_tags_raw":u"story_tags",
u"object_id":u"object_id",
u"application_raw":u"application",
}
date_to_check = [u"created_time", u"updated_time"]
self.user = user
for prop in props_to_check:
#if debugging: dLogger.log(" prop: %s"%prop)
if props_to_check[prop] in facebook_model and self.__dict__[prop] != facebook_model[props_to_check[prop]]:
#if debugging: dLogger.log(" facebook_model[%s]: %s"%(props_to_check[prop], facebook_model[props_to_check[prop]]))
self.__dict__[prop] = facebook_model[props_to_check[prop]]
model_changed = True
if debugging:
if prop == 'message':
dLogger.log(" %s has changed: %s"%(prop, self.__dict__[prop][:20]))
else:
dLogger.log(" %s has changed: %s"%(prop, self.__dict__[prop]))
if 'shares' in facebook_model:
#dLogger.log(' shares in FBModel')
if self.__dict__['shares_count'] != facebook_model['shares']['count']:
self.__dict__['shares_count'] = facebook_model['shares']['count']
#dLogger.log(' share_count has changed: %s'%self.__dict__['shares_count'])
model_changed = True
if 'likes' in facebook_model:
#dLogger.log(' likes in FBModel')
if self.__dict__['likes_count'] != facebook_model['likes']['summary']['total_count']:
self.__dict__['likes_count'] = facebook_model['likes']['summary']['total_count']
#dLogger.log(' share_count has changed: %s'%self.__dict__['likes_count'])
model_changed = True
if 'comments' in facebook_model:
#dLogger.log(' comments in FBModel')
if self.__dict__['comments_count'] != facebook_model['comments']['summary']['total_count']:
self.__dict__['comments_count'] = facebook_model['comments']['summary']['total_count']
#dLogger.log(' share_count has changed: %s'%self.__dict__['comments_count'])
model_changed = True
for prop in date_to_check:
if prop in facebook_model:
fb_val = facebook_model[prop]
try:
date_val = datetime.strptime(fb_val,'%Y-%m-%dT%H:%M:%S+0000')
except:
if debugging: dLogger.log(' THAT WEIRD ERROR AGAIN!')
date_val = None
if date_val and self.__dict__[prop] != date_val:
self.__dict__[prop] = date_val
model_changed = True
(changed, self_prop) = self.update_url_fk(self.picture, "picture", facebook_model)
if changed:
self.picture = self_prop
model_changed = True
(changed, self_prop) = self.update_url_fk(self.link, "link", facebook_model)
if changed:
self.link = self_prop
model_changed = True
(changed, self_prop) = self.update_url_fk(self.source, "source", facebook_model)
if changed:
self.source = self_prop
model_changed = True
(changed, self_prop) = self.update_url_fk(self.icon, "icon", facebook_model)
if changed:
self.icon = self_prop
model_changed = True
(changed, self_prop) = self.update_user_fk(self.ffrom, "from", facebook_model)
if changed:
self.ffrom = self_prop
model_changed = True
if model_changed:
self.model_update_date = datetime.utcnow()
self.error_on_update = False
try:
self.save()
except:
if self.message: self.message = self.message.encode('unicode-escape')
if self.name: self.name = self.name.encode('unicode-escape')
if self.description: self.description = self.description.encode('unicode-escape')
self.save()
if debugging: dLogger.log(" Message updated: %s"%self)
return model_changed
class FBComment(models.Model):
class Meta:
app_label = "snh"
def __unicode__(self):
if self.message:
return self.message[:50]
else:
return '-- No message --'
pmk_id = models.AutoField(primary_key=True)
fid = models.CharField(max_length=255, null=True, unique=True)
ffrom = models.ForeignKey('FBUser', related_name="postedComments", null=True)
message = models.TextField(max_length=255, null=True)
created_time = models.DateTimeField(null=True)
likes = models.IntegerField(null=True)
user_likes = models.BooleanField(default=False)
ftype = models.CharField(max_length=255, null=True)
post = models.ForeignKey('FBPost', null=True)
error_on_update = models.BooleanField()
#@dLogger.debug
def get_existing_user(self, param):
#if debugging: dLogger.log("<FBComment: %s>::get_existing_user()"%self.fid)
user = None
try:
user = FBUser.objects.get(**param)
except MultipleObjectsReturned:
user = FBUser.objects.filter(**param)[0]
except ObjectDoesNotExist:
pass
return user
@dLogger.debug
def update_user_fk(self, self_prop, face_prop, facebook_model):
#if debugging: dLogger.log("<FBComment: %s>::update_user_fk()"%self.fid)
model_changed = False
if face_prop in facebook_model:
prop_val = facebook_model[face_prop]
if prop_val and (self_prop is None or self_prop.fid != prop_val["id"]):
user = None
user = self.get_existing_user({"fid__exact":prop_val["id"]})
if not user:
try:
user = FBUser()
user.update_from_facebook(prop_val)
if debugging: dLogger.log(" new user created: %s"%user)
except IntegrityError:
user = self.get_existing_user({"fid__exact":prop_val["id"]})
if user:
user.update_from_facebook(prop_val)
else:
logger.debug(u">>>>CRITICAL CANT UPDATED DUPLICATED USER %s" % prop_val["id"])
self_prop = user
model_changed = True
return model_changed, self_prop
'''
{u'from':
{ u'id': u'711962332264123',
u'name': u'Christian Desch\xeanes'
},
u'like_count': 0,
u'can_remove': False,
u'created_time': u'2015-06-13T03:30:36+0000',
u'message': u'',
u'id': u'10153103006244620_10153103887959620',
u'user_likes': False
}
'''
@dLogger.debug
def update_from_facebook(self, facebook_model, status):
if debugging: dLogger.log("<FBComment: %s>::update_from_facebook()"%self.fid)
model_changed = False
props_to_check = {
u"fid":u"id",
u"message":u"message",
u"likes":u"like_count",
u"user_likes":u"user_likes",
u"ftype":u"type",
}
date_to_check = [u"created_time"]
self.post = status
if self.fid is None and "id" in facebook_model:
self.fid = facebook_model["id"]
model_changed = True
for prop in props_to_check:
if props_to_check[prop] in facebook_model and self.__dict__[prop] != facebook_model[props_to_check[prop]]:
self.__dict__[prop] = facebook_model[props_to_check[prop]]
model_changed = True
if debugging: dLogger.log(" %s has been updated"%prop)
for prop in date_to_check:
fb_val = facebook_model[prop]
date_val = datetime.strptime(fb_val,'%Y-%m-%dT%H:%M:%S+0000')
if self.__dict__[prop] != date_val:
self.__dict__[prop] = date_val
model_changed = True
(changed, self_prop) = self.update_user_fk(self.ffrom, "from", facebook_model)
if changed:
self.ffrom = self_prop
model_changed = True
if model_changed:
self.model_update_date = datetime.utcnow()
self.error_on_update = False
#logger.debug(u"FBComment exist and changed! %s" % (self.fid))
try:
self.save()
except:
self.message = self.message.encode('unicode-escape')
if debugging: dLogger.log(" Message needed unicode-escaping: '%s' (user: %s)"%(self.message, self.ffrom))
self.save()
if debugging: dLogger.log(" updated comment %s"%self)
#else:
# logger.debug(u">>>>>>>>>>>>>>>>>>FBComment exist and unchanged! %s" % (self.fid))
return model_changed
| |
import serial
import time
from ev import ev
import os
import ev3.ev3dev as ev3dev
import ev3dev as evCORE
import sys
#+ = close
screen = evCORE.Lcd()
try:
color = ev3dev.LegoSensor(port=4)
color.mode = 'COL-COLOR'
except BaseException:
pass
try:
button = ev3dev.LegoSensor(port=3)
except BaseException: pass
def insideinit():
global A, B, C, D, allm
letters = 'ABCD'
A = ev(port='A')
B = ev(port='B')
C = ev(port='C')
D = ev(port='D')
A.absolute = True
B.absolute = True
C.absolute = True
D.absolute = True
A.position = 0
B.position = 0
C.position = 0
D.position = 0
insideinit()
#for a in letters:
# eval('MOTORS_'+a+" = ev(port='"+a+"')")
# eval('\tMOTORS_'+a+'.absolute = True')
# eval('\tMOTORS_'+a+'.position = 0')
#MOTORS_A = ev(port='D')
#MOTORS_A.absolute = True
#MOTORS_A.position = 0
#AB = [MOTORS_A, MOTORS_B]
#CD = [MOTORS_C, MOTORS_D]
allm = [B, C, D, A]
#42-35
USE_SP = 3
LUCK = D
#color.mode = 'COL-COLOR'
N_COLORS = 6
N_COLORS_STOP = 0.7
CLAW_MOTOR = C
m_mark = {"N":1, "M":5, "G":2, "P":4, "S":3}
def run(speed, motors):
for motor in motors:
motor.write_value('estop', '0')
motor.write_value('reset', '1')
motor.run_forever(speed_sp=speed)
def push():
for i in range(2*2):
direct(90 + (-180)*((i+1)%2), [LUCK], NNIUD=0.4)
time.sleep(0.5)
def navigate(x, motors, speed):
x = m_mark[x]
run(speed, motors)
colorarray = []
while True:
pp = color.value0
colorarray.append(pp)
if len(colorarray) < N_COLORS: continue
del(colorarray[0])
if colorarray.count(x)/N_COLORS >= N_COLORS_STOP:
# print('COLOR DONE')
for motor in motors:
motor.write_value('stop_mode', 'hold')
motor.stop()
direct(-80, [CLAW_MOTOR])
run(0, [CLAW_MOTOR])
push()
run(-speed, motors)
while not button.value0: pass
for motor in motors: motor.stop()
return
# print('NO', colorarray)
def deviation_list(lst):
average = sum(lst)/len(lst)
return([abs(i-average) for i in lst])
def direct(speed, motors, NNIUD=1):
hh = [[] for i in range(len(motors))]
stopn = 0
# print("DIRECT MOTORS", motors)
for motor in motors:
# print(motor.port)
motor.run_forever(speed_sp=0)
motor.write_value('estop', '0')
motor.stop()
motor.write_value('reset', '1')
motor.run_forever(speed)
while True:
# try:
# if button.value0:
# for motor in motors:
# if motor is not None: motor.write_value('stop_mode', 'hold')
#
# return
# except BaseException: pass
for ind, motor in enumerate(motors):
if motor is None: continue
while True:
try:
hh[ind].append(motor.position)
break
except BaseException as e: print('YO', e)
if len(hh[ind]) > USE_SP:
del(hh[ind][0])
ans = deviation_list(hh[ind])
if sum(ans) / len(ans) < NNIUD:
motor.write_value('stop_mode', 'hold')
motors[ind] = None
stopn += 1
if stopn == len(motors): return
break
def readlineCR(port):
rv = ""
while True:
ch = port.read()
ch = ch.decode(encoding='ascii')
# print(ch, end='')
rv += ch
if ch=='\r' or ch=='':
return rv
#navigate("N", [A, B], 50)
port = serial.Serial("/dev/tty_in1", baudrate=115200, timeout=3.0)
print('START LISTEN')
while True:
msg = " ".join(readlineCR(port).strip().upper().split())
fl =open('/sys/class/leds/ev3:green:left/brightness', 'w+')# echo "0" > ./brightness
fl.write('0')
fl.close()
if len(msg) == 0: continue
fl =open('/sys/class/leds/ev3:green:left/brightness', 'w+')# echo "0" > ./brightness
fl.write('255')
fl.close()
screen.reset()
screen.draw.text((0,0), msg)
screen.update()
print('RCV', msg)
if msg == 'PING':
port.write(bytes('OK\r', encoding='ascii'))
if msg.startswith('FREE'):
# print('FREE', msg)
# os.system('sudo bash /home/mstop.sh')
motor = eval(msg.split('_')[1])
motor.write_value('estop', '1')
#motor.stop()
continue
if msg.startswith('NAVIGATE'):
# print('NAVIG', msg)
cmd = msg.split('_')
if cmd[1] == 'NONE':
push()
continue
q = []
for i in cmd[2]: q.append(eval(i))
navigate(cmd[1], q, int(cmd[3]) )
port.write(bytes('OK\r', encoding='ascii'))
if msg == 'INIT':
insideinit()
direct(0, allm.copy())
# for motor in allm: motor.write_value('estop', '0')
# print('INIT DONE')
continue
rcv = msg.split('_')
# print(rcv)
if rcv[0].startswith('MR'):
XY = []
for i in rcv[0][2:]:
XY.append(eval(i))
try:
speed = -int(rcv[1])
except BaseException as e:
port.write(bytes('ER\r', encoding='ascii'))
print('err 0', e)
continue
direct(speed, XY)
port.write(bytes('OK\r', encoding='ascii'))
| |
"""pIDLy 0.2.7: IDL within Python.
Control ITT's IDL (Interactive Data Language) from within Python.
https://github.com/anthonyjsmith/pIDLy
http://pypi.python.org/pypi/pIDLy/
Requirements:
* Pexpect
* NumPy
Usage:
>>> import pidly
>>> idl = pidly.IDL()
>> print(idl.__doc__)
Consult the docstrings or README.txt in the source distribution for
further information.
Copyright (c) 2008-2017, Anthony Smith
anthonysmith80@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
import sys
import re
import weakref
import atexit
import unittest
import tempfile
import os
import errno
from datetime import datetime
import numpy
import pexpect
from scipy.io import idl as idlio
now = datetime.now
__version__ = '0.2.6'
STR_DELIMITER = '!@#' # To distinguish items in an array of strings
try:
__IPYTHON__
_ipython = True
except NameError:
_ipython = False
# List of weak references to IDL sessions (primarily for IPython)
try:
weakrefs_to_pidly_sessions # Allow IDL to continue when module is reloaded
except NameError:
weakrefs_to_pidly_sessions = []
def close_all():
for pidly_weakref in weakrefs_to_pidly_sessions: #[:]:
if pidly_weakref() is not None:
pidly_weakref().close()
# Clean exit in IPython
atexit.register(close_all)
class IDLInputOverflowError(Exception):
"""Expression too long for IDL to receive."""
class IDL(pexpect.spawn):
"""pidly.IDL() : Launch IDL session within Python.
The IDL class inherits from pexpect.spawn. Consult pexpect
documentation for details of further methods.
Usage:
Initiate:
>>> import pidly
>>> idl = pidly.IDL()
Or:
idl = pidly.IDL('/path/to/idl')
Execute commands:
>>> idl('x = total([1, 1], /int)')
Retrieve values:
>>> print(idl.ev('x'))
2
Or (slightly slower):
>>> print(idl.x)
2
Evaluate expressions:
>>> print(idl.ev('x ^ 2'))
4
Use cache (IDL save) to handle large arrays:
>>> idl('x=[1,2,3,4,5,6]')
>>> print(idl.ev('x', use_cache=True))
[1 2 3 4 5 6]
Transfer a list of IDL variables, using cache:
>>> idl('y=[1,2,3,4,5,6]')
>>> xy = idl.ev_list(['x','y'], use_cache=True)
>>> print(sorted(xy.keys()))
['x', 'y']
>>> print(xy['x'])
[1 2 3 4 5 6]
Assign value from Python expression:
>>> idl.x = 2 + 2
>>> print(idl.x)
4
Or:
>>> idl('x', 2 + 2)
>>> print(idl.x)
4
Perform IDL function on Python expression(s):
>>> idl.func('reform', range(4), 2, 2)
array([[0, 1],
[2, 3]])
Or (slightly slower):
>>> idl.reform(range(4), 2, 2)
array([[0, 1],
[2, 3]])
With keywords (/L64 -> L64=True or L64=1)
>>> x = idl.histogram(range(4), binsize=3, L64=True)
>>> print(x)
[3 1]
>>> print(x.dtype)
int64
IDL procedure with Python argument(s):
>>> idl.pro('plot', range(10), range(10), xstyle=True, ystyle=True)
Interactive mode:
>> idl.interact()
IDL> print, x
4
IDL> ^D
>>>
Close:
>>> idl.close()
pIDLy supports the transfer of:
* ints, longs, ...
* floats, doubles, ...
* strings
* arrays of the above types, with arbitrary size and shape
* dictionaries <-> structures & lists of dicts <-> arrays of structures
but with certain limitations on transfer from Python to IDL
[NB if getting Syntax Errors when passing large arrays to IDL, try using
>> idl = pidly.IDL(long_delay=0.05)
default is 0.02.]
"""
def __init__(self, *arguments, **kwargs):
# NB set .logfile_send = sys.stdout to monitor commands sent
# The speed Py -> IDL is limited by the IDL input buffer,
# which can overload. self.delaybeforesend is increased
# before sending long arrays.
self.short_delay = kwargs.pop('short_delay', 0)
# As small as possible! (try 0.014)
self.long_delay = kwargs.pop('long_delay', 0.02)
# Temp dir for get_from_save
self._cache_dir = kwargs.pop("cache_dir", None)
self.use_cache = kwargs.pop("use_cache", False)
# There are limits to how data may be passed to IDL:
# max_sendline is the number of bytes that may be sent in one line
# (Final, additional, byte is the end-of-line)
self.max_sendline = kwargs.pop('max_sendline', 1023)
# max_idl_code_area is the maximum number of bytes that may
# be input as an IDL command. This limit may be reached by
# splitting the line and sending as more than one send()
# command
# (Must be 1244 or greater, otherwise command in ev() fails)
self.max_idl_code_area = kwargs.pop('max_idl_code_area', 2046) # 2048?
# Number of array elements in IDL code area limited to 249 (don't ask)
# (Was 331 on IDL 6.x and 7[?], but 249 on 7.1.1)
self.max_n_elements_code_area = kwargs.pop('max_n_elements_code_area',
249)
# Custom IDL prompt
self.idl_prompt = kwargs.pop('idl_prompt', 'IDL> ')
# Begin
if len(arguments) == 0:
arguments = ('idl',)
if 'timeout' not in kwargs:
kwargs['timeout'] = None
pexpect.spawn.__init__(self, *arguments, **kwargs)
self.delaybeforesend = self.short_delay # Changed for long commands
self._wait_for_prompt()
# For clean exit in IPython and for close_all()
weakrefs_to_pidly_sessions.append(weakref.ref(self))
self.ready = True # For __setattr__
def close(self):
"""
Close IDL session.
Try to call IDL exit function - this way you may still be able
to terminate IDL if it is a called indirectly through a script.
"""
if self.isalive():
try:
self.ex('exit', print_output=False, ret=True)
except OSError:
pass
super(IDL, self).close()
def ex(self, expression, assignment_value=None,
print_output=True, ret=False):
"""Execute a command in IDL.
If assignment_value is set (to a Python expression), this value is
assigned to the IDL variable named in expression.
"""
# Assign value to expression?
if assignment_value is not None:
expression = self._python_to_idl_input(assignment_value,
expression)
# List of commands to execute?
if hasattr(expression, '__iter__') and not isinstance(expression, (str, bytes, bytearray)):
# Long assignments are broken down into lists: iterate then return
# Or can receive a list of commands directly
output = []
print("Sending", len(expression), "commands to IDL:", end=' ')
sys.stdout.flush()
for exp in expression:
sys.stdout.write(".")
sys.stdout.flush()
out = self.ex(exp, print_output=print_output, ret=ret)
if out:
output.append(out)
self.delaybeforesend = self.long_delay
self.delaybeforesend = self.short_delay
print("done.")
if ret:
return ''.join(output)
else:
return
# Send expression to IDL
if self._send_expression_to_idl(expression): # Any bytes sent?
# Wait for command to be completed, and optionally print output
self.readline() # First line of output = IDL command repeated
idl_output = self._wait_for_prompt(print_output=print_output)
# Return IDL output
if ret and idl_output:
return idl_output
def ev(self, expression, print_output=True, use_cache=None):
"""Return the value of an IDL expression as a numpy.ndarray."""
# Evaluate expression and store as an IDL variable
if expression != 'pidly_tmp':
self.ex('pidly_tmp=' + expression, print_output=print_output)
if use_cache is None:
use_cache = self.use_cache
if use_cache:
return self._get_from_save(['pidly_tmp'])['pidly_tmp']
else:
return self._get_from_print()
def ev_list(self, names, print_output=True, use_cache=None):
"""Return a dictionary containing values of IDL variables in list names."""
if not isinstance(names, (list, tuple)):
raise ValueError("input should be a list or tuple, not ", type(names))
if use_cache is None:
use_cache = self.use_cache
if use_cache:
return self._get_from_save(names)
else:
result = {}
for eachname in names:
result[eachname] = self.ev(eachname, print_output=print_output)
return result
def interact(self, show_prompt=True, **kwargs):
"""Interactive IDL shell. Press ^D to return to Python."""
if show_prompt:
print(self.idl_prompt, end=' ')
sys.stdout.flush()
if 'escape_character' not in kwargs:
kwargs['escape_character'] = '\x04'
pexpect.spawn.interact(self, **kwargs)
if not _ipython:
print("")
interact.__doc__ += "\n\n " + pexpect.spawn.interact.__doc__
def variables(self):
"""Return list of names of defined IDL variables."""
# Retrieve output from IDL help command ('help' without 'output=...'
# prints one screen at a time, waiting for spacebar...)
self.ex('help, output=pidly_tmp')
help_output = self.ev('pidly_tmp', use_cache=False).tolist() # String array
variable_list = []
for line in help_output[1:]: # 1st line = "%..."
if line.startswith('Compiled'): # End of variable list
break
elif line and not line.startswith('%'):
variable_list.append(line.split()[0])
return variable_list
def func(self, name, *args, **kwargs):
"""Evaluate IDL function."""
try:
string_bits = []
for i, arg in enumerate(args):
string_bits.append(self._python_to_idl_input(arg))
for j, kwarg in enumerate(kwargs):
string_bits.append(
kwarg + '=' + self._python_to_idl_input(kwargs[kwarg]))
return self.ev(name + '(' + ','.join(string_bits) + ')', use_cache=False)
except IDLInputOverflowError:
string_bits = []
## arg_vals = args
## kwarg_vals = kwargs
## for arg in args:
## arg_vals.append(arg)
## for kwarg in kwargs:
## kwarg_vals.append(kwarg)
for i, arg in enumerate(args):
self.ex('pidly_tmp' + str(i), arg)
string_bits.append('pidly_tmp' + str(i))
for j, kwarg in enumerate(kwargs):
self.ex('pidly_tmp' + str(len(args) + j), kwargs[kwarg])
string_bits.append(
kwarg + '=' + 'pidly_tmp' + str(len(args) + j))
return self.ev(name + '(' + ','.join(string_bits) + ')', use_cache=False)
def pro(self, name, *args, **kwargs):
"""Execute IDL procedure."""
try:
string_bits = []
for i, arg in enumerate(args):
string_bits.append(self._python_to_idl_input(arg))
for j, kwarg in enumerate(kwargs):
string_bits.append(
kwarg + '=' + self._python_to_idl_input(kwargs[kwarg]))
return self.ex(name + ',' + ','.join(string_bits))
except IDLInputOverflowError:
string_bits = []
for i, arg in enumerate(args):
self.ex('pidly_tmp' + str(i), arg)
string_bits.append('pidly_tmp' + str(i))
for j, kwarg in enumerate(kwargs):
self.ex('pidly_tmp' + str(len(args) + j), kwargs[kwarg])
string_bits.append(
kwarg + '=' + 'pidly_tmp' + str(len(args) + j))
return self.ex(name + ',' + ','.join(string_bits))
# Special methods
# Calling the instance is the same as executing an IDL command.
__call__ = ex
def __getattr__(self, name):
"""Get IDL attribute.
idl.x: return the value of 'x' from IDL.
idl.f(x,y,...): return the value of IDL f() of Python variables x,y,...
"""
# idl.x
if name.upper() in self.variables(): # Takes time!
return self.ev(name)
# idl.f(x,y,...)
elif (not name.startswith('_')
and name not in ['trait_names', 'pidly_tmp']): # for IPython
def idl_function(*args, **kwargs):
return self.func(name, *args, **kwargs)
return idl_function
def __setattr__(self, name, value):
"""Set IDL attribute: idl.x = ..."""
if 'ready' in self.__dict__:
# __init__ has finished
if name in self.__dict__:
pexpect.spawn.__setattr__(self, name, value)
else:
self.ex(name, value)
else:
# __init__ in progress
pexpect.spawn.__setattr__(self, name, value)
# "PRIVATE" METHODS
# Sending to IDL
def _send_expression_to_idl(self, expression):
"""Send a string to IDL and return the no. of bytes sent (or False)."""
# Only method that sends anything to IDL
if len(expression) > self.max_sendline:
if len(expression) <= self.max_idl_code_area:
# Long line: need to send it in chunks
expression += '\n'
for i in range((len(expression) - 1)
// (self.max_sendline + 1) + 1):
self.send(expression[(self.max_sendline + 1) * i
: (self.max_sendline + 1) * (i + 1)])
self.delaybeforesend = self.long_delay
self.delaybeforesend = self.short_delay
return True
else:
raise IDLInputOverflowError("Expression too long for IDL to receive: cannot execute")
else:
if not self.isalive():
raise OSError("IDL session is not alive.")
return self.sendline(expression)
def _python_to_idl_input(self, python_input, assign_to=None):
"""Take Python value and return string suitable for IDL assignment.
For long input, returns a list of executable strings.
"""
if assign_to is not None:
assign_to_str = assign_to + "="
else:
assign_to_str = ''
if isinstance(python_input, str):
# Strings need additional quotes
idl_input = assign_to_str + "\'" + python_input + "\'"
else:
# Convert to numpy array
py_in = numpy.array(python_input)
# Display warning if conversion has changed the array values
if ((not isinstance(python_input, numpy.ndarray))
and py_in.tolist() != python_input
and py_in.dtype.name[0:3] == 'str'):
print("(!) Conversion to numpy.array has changed input from:", file=sys.stderr)
print(python_input, file=sys.stderr)
print("to:", file=sys.stderr)
print(py_in.tolist(), file=sys.stderr)
# String format (must have commas between elements)
if py_in.dtype.name == 'float64':
str_py_in = ''.join([
'[',
','.join(str('%.17e' % s)
for s in py_in.flatten().tolist()),
']']).replace(' ', '').replace('e', 'd')
elif py_in.dtype.name[0:3] == 'str':
str_py_in = str(py_in.flatten().tolist()).replace("', ", "',")
else:
str_py_in = str(py_in.flatten().tolist()).replace(" ", "")
if len(py_in.shape) > 1:
# IDL can't handle list concatenations with > 3 dimensions
str_py_in_shape = ("reform(" + str_py_in + ","
+ str(py_in.shape[::-1])[1:-1] + ")")
elif len(py_in.shape) > 0:
str_py_in_shape = str_py_in
else:
str_py_in_shape = str_py_in[1:-1] # Remove '[' and ']'
# Dictionary? Convert to IDL structure
if ((not hasattr(py_in.tolist(), 'keys')
and hasattr(py_in.tolist(), '__iter__')
and hasattr(py_in.tolist()[0], 'keys'))
or hasattr(py_in.tolist(), 'keys')):
return self._python_to_idl_structure(python_input, assign_to)
else:
# Cast as appropriate type
idl_input = self._idl_cast_from_dtype(py_in.dtype,
str_py_in_shape)
idl_input = ''.join([assign_to_str, idl_input])
if (len(idl_input) > self.max_idl_code_area
or len(py_in.flatten()) > self.max_n_elements_code_area):
# String too long! Need to create list of shorter commands
if assign_to is None:
raise IDLInputOverflowError("Expression too long for IDL to receive")
else:
idl_input = self._split_idl_assignment(py_in, str_py_in,
assign_to)
return idl_input
def _split_idl_assignment(self, py_in, str_py_in, assign_to):
"""Take a very long numpy array and return a list of commands
to execute in order to assign this value to an IDL variable."""
if assign_to is None:
print("(!) No assign_to set.", file=sys.stderr)
idl_input = []
extend_string = ''
# Each assignment string must be no longer than max_idl_code_area:
# assign_to=[assign_to,<max_length>
max_length = self.max_idl_code_area - 2 * len(assign_to) - 3
# In addition, code area limited by number of elements of array
max_n_elements = self.max_n_elements_code_area
# Loop until string has been split up into manageable chunks
array_string_remaining = str_py_in[1:] # Remove '['
while len(array_string_remaining) > 1:
# Take the first max_n_elements elements (separated by
# commas) of the first max_length characters of the string
max_string = re.match('([^,]*[,\]]){,' + str(max_n_elements) + '}',
array_string_remaining[:max_length]).group()
# Remove final character (',' or ']') from max_string
idl_input.append(assign_to + "=[" + extend_string +
max_string[:-1] + "]")
# Not for the first time round
extend_string = ''.join([assign_to, ","])
# What's left?
array_string_remaining = array_string_remaining[len(max_string):]
if len(py_in.shape) > 1:
# Convert data type and shape
idl_input.append(assign_to + "=" + "reform(" +
self._idl_cast_from_dtype(py_in.dtype, assign_to)
+ ", " + str(py_in.shape[::-1])[1:-1] + ")")
else:
# Convert data type
idl_input.append(assign_to + "=" +
self._idl_cast_from_dtype(py_in.dtype, assign_to))
return idl_input
def _idl_cast_from_dtype(self, dtype, idl_str):
"""Take a NumPy dtype and return an expression to cast an IDL
expression as appropriate type."""
if dtype.name[0:3] == 'str':
return idl_str
# NaN and Inf
idl_str = idl_str.replace('inf', '!values.f_infinity')
idl_str = idl_str.replace('nan', '!values.f_nan')
if dtype.name == 'bool':
return "byte(" + str(int(eval(idl_str))) + ")"
elif dtype.name == 'uint8':
return "byte(" + idl_str + ")"
elif dtype.name == 'int16':
return "fix(" + idl_str + ")"
elif dtype.name == 'uint16':
return "uint(" + idl_str + ")"
elif dtype.name == 'int32':
return "long(" + idl_str + ")"
elif dtype.name == 'uint32':
return "ulong(" + idl_str + ")"
elif dtype.name == 'int64':
return "long64(" + idl_str.replace('L', 'LL') + ")"
elif dtype.name == 'uint64':
return "ulong64(" + idl_str.replace('L', 'LL') + ")"
elif dtype.name == 'float8': # Not a NumPy type?
print("Warning: converting 8-bit to 32-bit float.", file=sys.stderr)
return "float(" + idl_str + ")"
elif dtype.name == 'float16': # Not a NumPy type?
print("Warning: converting 16-bit to 32-bit float.", file=sys.stderr)
return "float(" + idl_str + ")"
elif dtype.name == 'float32':
return "float(" + idl_str + ")"
elif dtype.name == 'float64':
return "double(" + idl_str + ")"
else:
print("(!) Could not convert NumPy dtype", \
dtype.name, "to IDL.", file=sys.stderr)
return
def _python_to_idl_structure(self, python_input, assign_to):
"""Given a Python dictionary, or a (simple, 1D) list of dictionaries,
return list of command(s) to assign IDL structure to assign_to."""
# Convert from numpy array if necessary
py_in = numpy.array(python_input).tolist()
try:
return self._python_to_idl_structure_short(py_in, assign_to)
except IDLInputOverflowError:
if assign_to is not None:
return self._python_to_idl_structure_long(py_in, assign_to)
else:
raise
def _python_to_idl_structure_short(self, py_in, assign_to):
"""Given a Python dictionary, or a (simple, 1D) list of dictionaries,
return single command to assign IDL structure to assign_to."""
if hasattr(py_in, 'keys'):
py_in_list = [py_in]
else:
py_in_list = py_in
idl_input_list = []
for row in py_in_list:
struct_fields = []
for key in row:
struct_fields.append(''.join(
[key, ':', self._python_to_idl_input(row[key])]))
idl_input_list.append('{' + ','.join(struct_fields) + '}')
if hasattr(py_in, 'keys'):
idl_input = idl_input_list[0]
else:
idl_input = '[' + ','.join(idl_input_list) + ']'
if assign_to is not None:
idl_input = assign_to + '=' + idl_input
if len(idl_input) > self.max_idl_code_area:
# String too long! Need to create list of shorter commands
raise IDLInputOverflowError("Expression too long for IDL to receive")
else:
return idl_input
def _python_to_idl_structure_long(self, py_in, assign_to):
"""Given a Python dictionary, or a (simple, 1D) list of dictionaries,
return a list of commands to assign IDL structure to assign_to."""
if hasattr(py_in, 'keys'):
n_rows = 1
py_in_row = py_in
else: # List of dictionaries
n_rows = len(py_in)
py_in_row = py_in[0]
# Make one row
struct_fields = []
for key in py_in_row:
struct_fields.append(''.join(
[key, ':', self._python_to_idl_input(py_in_row[key])]))
# Commands to execute
idl_input = [assign_to + '={' + ','.join(struct_fields) + '}']
if n_rows > 1:
# Fill rows with data
for row in py_in[1:]:
struct_fields = []
for key in row:
struct_fields.append(''.join(
[key, ':', self._python_to_idl_input(row[key])]))
idl_input.append(assign_to + '=[' + assign_to + ',{'
+ ','.join(struct_fields) + '}]')
return idl_input
# Receiving from IDL
def _get_from_save(self, names):
"""Get values for a list of names.
Use save command in IDL to save arrays/structure into file and use
readsav to read it into python objects. This will save a lot of time
when handling large arrays.
"""
if self._cache_dir is None:
self._cache_dir=tempfile.mkdtemp()
else:
if sys.version_info.major < 3:
try:
os.makedirs(self._cache_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(self._cache_dir, exist_ok=True)
if names != ['pidly_tmp']:
valuelist = self.variables()
for eachname in names:
if eachname.upper() not in valuelist:
raise NameError("name {} not in idl variable list".format(eachname))
savePath = os.path.join(self._cache_dir, 'pidly.sav')
todoStr = "save," + ",".join(names) + ",file='{}'".format(savePath)
self.ex(todoStr)
return idlio.readsav(savePath)
def _get_from_print(self):
"""Get IDL's string representation of expression."""
# Special treatment of arrays of type string (type == 7)
# Floats/doubles printed with appropriate (or too much) precision
idl_output = self.ex(
# Arrays of strings
'if size(pidly_tmp,/type) eq 7 and n_elements(pidly_tmp) gt 1 '
+ 'then print,strjoin(reform(pidly_tmp,n_elements(pidly_tmp)),'
+ '\'' + STR_DELIMITER + '\') '
# Structures
+ 'else if n_elements(pidly_tmp) gt 0 '
+ 'and size(pidly_tmp,/type) eq 8 then '
+ 'for i = 0, n_elements(pidly_tmp) - 1 do begin'
+ ' print, "{" & '
+ ' for j = 0, n_tags(pidly_tmp) - 1 do '
+ ' if size(pidly_tmp[i].(j),/type) eq 5 then '
+ ' print,reform(pidly_tmp[i].(j), '
+ ' n_elements(pidly_tmp[i].(j))), format="(E26.18)" '
+ ' else if size(pidly_tmp[i].(j),/type) eq 4 then '
+ ' print,reform(pidly_tmp[i].(j),'
+ ' n_elements(pidly_tmp[i].(j))), format="(E19.11)" '
+ ' else if size(pidly_tmp[i].(j),/type) eq 7 '
+ ' and n_elements(pidly_tmp[i].(j)) gt 1 then '
+ ' print,strjoin(reform(pidly_tmp[i].(j),'
+ ' n_elements(pidly_tmp[i].(j))),'
+ ' \'' + STR_DELIMITER + '\') '
+ ' else print, reform(pidly_tmp[i].(j),'
+ ' n_elements(pidly_tmp[i].(j))) & '
+ ' print, "}" &'
+ ' endfor '
# Doubles
+ 'else if n_elements(pidly_tmp) gt 0 '
+ 'and size(pidly_tmp,/type) eq 5 then print,reform(pidly_tmp,'
+ 'n_elements(pidly_tmp)), format="(E26.18)" '
# Floats
+ 'else if n_elements(pidly_tmp) gt 0 '
+ 'and size(pidly_tmp,/type) eq 4 then print,reform(pidly_tmp,'
+ 'n_elements(pidly_tmp)), format="(E19.11)" '
# Other
+ 'else if n_elements(pidly_tmp) gt 0 then print,reform(pidly_tmp,'
+ 'n_elements(pidly_tmp))',
print_output=False, ret=True)
# Parse this string into a python variable
if idl_output:
idl_type_dims = self.ex(
'print,size(pidly_tmp,/type) & '
+ 'print,size(pidly_tmp,/dimensions)',
print_output=False, ret=True).splitlines()
idl_type = int(idl_type_dims[0])
idl_dims = numpy.array(
''.join(idl_type_dims[1:]).split()).astype(int)
if idl_type == 8: # Structure
self.ex('help,pidly_tmp,/struct,output=pidly_tmp_2',
print_output=False)
idl_struct = self.ev('pidly_tmp_2', use_cache=False).tolist()
#idl_output = ''.join(['{', idl_output, '}'])
return self._idl_struct_to_python(idl_output, idl_struct)
else:
return self._idl_output_to_python(idl_output, idl_type,
idl_dims)
def _wait_for_prompt(self, print_output=False):
"""Read IDL output until IDL prompt displayed and return."""
index = 1
output_lines = []
stop = False
halt = False
while index == 1:
try:
# 0: waiting for input, 1: output received, 2: IDL exited
index = self.expect([self.idl_prompt, '\n', pexpect.EOF])
except KeyboardInterrupt:
print("\npIDLy: KeyboardInterrupt")
if not _ipython:
print(self.before)
sys.stdout.flush()
self.interact(show_prompt=False)
break
if sys.version_info.major < 3:
new_line = self.before.replace('\r', '')
else:
new_line = self.before.decode().replace('\r', '')
if new_line.startswith('% Stop encountered:'):
stop = True
if new_line.startswith('% Execution halted at:'):
halt = True
if new_line:
output_lines.append(new_line)
if print_output:
print(new_line) # Live output while waiting for prompt
if halt or stop:
if not print_output: # print output anyway
print('\n'.join(output_lines))
self.interact()
return '\n'.join(output_lines)
def _idl_output_to_python(self, idl_output, idl_type, idl_dims):
"""Take output from IDL print statement and return value."""
# Find Python dtype and shape
dtype = self._dtype_from_idl_type(idl_type) # = None for string
shape = self._shape_from_idl_dims(idl_dims)
# Split the output into separate items
if idl_type == 7:
value = idl_output.split(STR_DELIMITER)
else:
value = idl_output.split()
if value:
if idl_type == 7: # String
if shape == ():
# Concatenate string
value = ' '.join(value)
# Convert to numpy.array of appropriate type
if dtype is None:
value = numpy.array(value)
else:
value = numpy.array(value).astype(dtype)
# Reshape array
if numpy.product(shape) != value.size:
print("(!) Could not reshape array.", file=sys.stderr)
else:
value = value.reshape(shape)
return value
def _dtype_from_idl_type(self, idl_type):
"""Convert IDL type to numpy dtype."""
if idl_type is not None:
python_idl_types = [
None, 'uint8', 'int16', 'int32', 'float32', 'float64',
None, None, None, None, None,
None, 'uint16', 'uint32', 'int64', 'uint64']
dtype = python_idl_types[idl_type]
if dtype is None and idl_type != 7:
print("(!) Could not convert IDL type " \
+ str(idl_type) + " to Python.", file=sys.stderr)
else:
dtype = None
return dtype
def _shape_from_idl_dims(self, idl_dims):
"""Convert IDL dimensions to numpy shape."""
# Dimensions run the opposite way, Python vs IDL
shape = numpy.array(idl_dims[::-1]).tolist()
if shape == [0]:
shape = []
return tuple(shape)
def _idl_struct_to_python(self, idl_output, idl_struct):
"""Take print output of IDL structure and return Python dictionary.
No spaces allowed in field names.
"""
# Create meta-dictionary
dict_def = []
j = 1 # Omit first line
for i in range(1, int(idl_struct[0].split()[3]) + 1): # Number of tags
# For each field, store (name, dtype, shape) in the meta-dictionary
idl_struct_split = idl_struct[j].replace(', ',',').split()
name = idl_struct_split[0]
try:
idl_type = self._idl_type_from_name(idl_struct_split[1])
idl_dims = self._dims_from_struct_help(idl_struct_split[2])
except IndexError: # Some descriptions span two lines
j += 1
idl_type = self._idl_type_from_name(idl_struct_split[0])
idl_dims = self._dims_from_struct_help(idl_struct_split[1])
dict_def.append((name, idl_type, idl_dims))
j += 1
dict_list = []
idl_output = ''.join(idl_output)
rows = idl_output[2:-1].split('\n}\n{\n') # Remove {\n and } at ends
for row in rows: # Each structure in array of structures
# Create a dictionary for each row
items = row.splitlines()
dict_row = {}
for name, idl_type, idl_dims in dict_def:
idl_out_list = []
if idl_type == 7: # String
idl_out = items.pop(0)
else:
line_items = items.pop(0).split()
for i in range(max(numpy.product(idl_dims), 1)): # No. values
try:
idl_out_list.append(line_items.pop(0))
except IndexError:
line_items = items.pop(0).split()
idl_out_list.append(line_items.pop(0))
idl_out = ' '.join(idl_out_list)
dict_row[name.lower()] = self._idl_output_to_python(
idl_out, idl_type, idl_dims)
dict_list.append(dict_row)
if len(dict_list) == 1:
return dict_list[0]
else:
return dict_list
def _idl_type_from_name(self, type):
"""Return IDL type code, given type name."""
if type == 'BYTE':
return 1
elif type == 'INT':
return 2
elif type == 'LONG':
return 3
elif type == 'FLOAT':
return 4
elif type == 'DOUBLE':
return 5
elif type == 'COMPLEX':
return 6
elif type == 'STRING':
return 7
elif type == 'STRUCT':
return 8
elif type == 'DCOMPLEX':
return 9
elif type == 'POINTER':
return 10
elif type == 'OBJREF':
return 11
elif type == 'UINT':
return 12
elif type == 'ULONG':
return 13
elif type == 'LONG64':
return 14
elif type == 'ULONG64':
return 15
def _dims_from_struct_help(self, struct_help):
"""Return IDL dims given description from structure."""
try:
dims = re.search('(?<=Array\[).*\]', struct_help).group()[:-1]
idl_dims = numpy.array(dims.split(',')).astype(int)
return idl_dims
except AttributeError:
return [0]
class TestPidly(unittest.TestCase):
"""Unit tests for pIDLy."""
def setUp(self):
if len(sys.argv) > 1 and sys.argv[0].endswith('test_pidly.py'):
self.idl = IDL(sys.argv[1])
else:
self.idl = IDL()
self.start_time = None
self.mid_time = None
self.end_time = None
def tearDown(self):
def t(time_delta):
return time_delta.seconds + time_delta.microseconds / 1000000.
self.idl.close()
try:
print("%0.5ss/%0.5ss " % (t(self.mid_time - self.start_time),
t(self.end_time - self.mid_time)), end=' ')
sys.stdout.flush()
except TypeError:
try:
print("%0.5ss " % t(self.end_time - self.start_time), end=' ')
sys.stdout.flush()
except TypeError:
pass
def sendAndReceive(self, x):
self.start_time = now()
self.idl.x = x
self.mid_time = now()
y = self.idl.ev('x')
#y = self.idl.x # Twice as slow!
self.end_time = now()
return y
def test_idl_dead(self):
self.idl.close()
with self.assertRaises(OSError):
self.idl('print, 1')
def test_longest_line(self):
s = ["x='"]
for i in range(self.idl.max_sendline - 4):
s.append('a')
s.append("'")
x = ''.join(s)
self.start_time = now()
self.idl.sendline(x)
self.idl.expect('IDL> ')
self.mid_time = now()
y = self.idl.x
self.end_time = now()
self.assertEqual(y, x[3:-1])
def test_longest_string(self):
n = self.idl.max_idl_code_area - 10
x = ''.join(["x='"] + ["a" for i in range(n)] + ["'"])
self.start_time = now()
self.idl(x)
self.mid_time = now()
y = self.idl.x
self.end_time = now()
self.assertEqual(y, x[3:-1])
def test_longest_string_overflow(self):
s = ["x='"]
for i in range(self.idl.max_idl_code_area - 3):
s.append('a')
s.append("'")
x = ''.join(s)
self.assertRaises(IDLInputOverflowError, self.idl, x)
def test_20_function_calls(self):
x = numpy.random.random(20)
y = numpy.zeros(20)
self.start_time = now()
for i in range(20):
y[i] = self.idl.sin(x[i])
self.end_time = now()
self.assertEqual(y.tolist(), numpy.sin(x).tolist())
def test_20_function_calls_explicit(self):
x = numpy.random.random(20)
y = numpy.zeros(20)
self.start_time = now()
for i in range(20):
y[i] = self.idl.func('sin', x[i])
self.end_time = now()
self.assertEqual(y.tolist(), numpy.sin(x).tolist())
def test_20_function_calls_really_explicit(self):
x = numpy.random.random()
self.idl.x = x
self.start_time = now()
for i in range(20):
y = self.idl.ev('sin(x)')
self.end_time = now()
self.assertEqual(y, numpy.sin(x))
def test_long_function_call(self):
x = numpy.random.random(1000)
self.start_time = now()
y = self.idl.total(x)
self.end_time = now()
self.assertEqual(y, sum(x))
def test_long_function_dicts(self):
x = [{'a':numpy.random.random()} for i in range(100)]
self.start_time = now()
y = self.idl.n_elements(x)
self.end_time = now()
self.assertEqual(y, len(x))
def test_longish_function_call(self):
# Total length raises IDLInputOverflowError, but each arg is short
x = numpy.random.random(84)
self.start_time = now()
y = self.idl.total(x, double=True)
self.end_time = now()
self.assertEqual(y, sum(x))
def test_single_integer(self):
x = 2
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_single_element_list_int(self):
x = [2]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_single_float(self):
x = 2.123
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_single_float32(self):
x = numpy.array(2.123, dtype='float32')
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_huge_double(self):
x = -1e100
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_infinity(self):
x = numpy.inf
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertTrue(numpy.isinf(x))
self.assertTrue(numpy.isinf(y))
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_infinity_neg(self):
x = -numpy.inf
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertTrue(numpy.isneginf(x))
self.assertTrue(numpy.isneginf(y))
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_nan(self):
x = numpy.nan
y = self.sendAndReceive(x)
# NB NaN != NaN
self.assertTrue(numpy.isnan(x))
self.assertTrue(numpy.isnan(y))
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_inf_nan_array(self):
x = [1.2, numpy.nan, numpy.inf, -numpy.inf, 3, numpy.nan, 4]
y = self.sendAndReceive(x)
# NB NaN != NaN
self.assertTrue(all(numpy.isnan([x[1], x[5]])))
self.assertTrue(all(numpy.isnan([y[1], y[5]])))
self.assertEqual(x[0::2], y.tolist()[0::2])
self.assertEqual(x[3], y[3])
self.assertEqual(numpy.array(x).shape, y.shape)
self.assertEqual(numpy.array(x).dtype, y.dtype)
def test_single_string(self):
x = 'hello'
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_multi_word_string(self):
x = 'hello there'
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_list_of_strings(self):
x = [' 5 2 5k 2', '4 33 55 1 ', ' 4 ', '2', ' 3 2']
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_3d_list_of_strings(self):
x = [[['b', ' d s '], ['ff ', 's d ewew']],
[['', 'f'], ['gs', 'a']]]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_long_integer(self):
x = 25525252525525
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_int_array(self):
x = [1,2,4,2555]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_long_int_array(self):
x = [1,2,3,25525252525525,23, 5, 6, 5, 2, 5, 7, 8, 3, 5]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_2d_int_array(self):
x = [[1,2,3],[3,4,5]]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_3d_int_array(self):
x = [[[ 1, 2, 3],[ 4, 5, 6]],[[ 7, 8, 9],[10,11,12]],
[[13,14,15],[16,17,18]],[[19,20,21],[22,23,24]]]
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x)
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_mixed_array_warning(self):
x = [22,23.,'24']
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), numpy.array(x).tolist())
self.assertEqual(numpy.array(x).shape, y.shape)
def test_simple_dictionary(self):
x = dict(a=2)
y = self.sendAndReceive(x)
self.assertEqual(y, x)
self.assertEqual(numpy.array(x['a']).dtype, y['a'].dtype)
self.assertEqual(numpy.array(x).shape, numpy.array(y).shape)
def test_3_element_dict(self):
x = {'a':'a h ', 'b':1e7, 'c':0.7}
y = self.sendAndReceive(x)
self.assertEqual(y, x)
for key in x:
self.assertEqual(numpy.array(x[key]).dtype, y[key].dtype)
self.assertEqual(numpy.array(x).shape, numpy.array(y).shape)
def test_dict_string_arrays(self):
x = {'a': numpy.arange(2*5*1).reshape(2,5,1),
'b': [[['b', ' d s '], ['ff ', 's d ewew']],
[['', 'f'], ['gs', 'a']]],
'c': 5, 'd': [1, 5, 2.3]}
y = self.sendAndReceive(x)
self.assertEqual(sorted(x.keys()), sorted(y.keys()))
self.assertEqual([y[key].tolist() for key in y],
[numpy.array(x[key]).tolist() for key in y])
self.assertEqual([y[key].dtype for key in y],
[numpy.array(x[key]).dtype for key in y])
self.assertEqual([y[key].shape for key in y],
[numpy.array(x[key]).shape for key in y])
def test_3_3_element_dicts(self):
x = [{'a':'ah', 'b':1000, 'c':0.7}, {'a':'be', 'b':8, 'c':-6.3},
{'a':'x', 'b':0, 'c':81.}]
y = self.sendAndReceive(x)
self.assertEqual(y, x)
for i, d in enumerate(x):
for key in d:
self.assertEqual(numpy.array(d[key]).dtype,
y[i][key].dtype)
self.assertEqual(numpy.array(x).shape, numpy.array(y).shape)
def test_100_dicts_float32_double_string(self):
x = [{'a':numpy.random.random(2).astype('float32'),
'b':numpy.random.random(1),
'c':(numpy.random.random(1)*100).astype('str')}
for i in range(100)]
y = self.sendAndReceive(x)
for i, d in enumerate(x):
self.assertEqual([y[i][key].tolist() for key in y[i]],
[d[key].tolist() for key in d])
for i, d in enumerate(x):
for key in d:
self.assertEqual(d[key][0].dtype,
y[i][key][0].dtype)
self.assertEqual(numpy.array(x).shape, numpy.array(y).shape)
def test_4d_int_array(self):
x = numpy.arange(2*3*4*5).reshape(2,3,4,5)
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_6d_int_array_tests_max_n_elements_code_area(self):
x = numpy.arange(2*3*4*5*6*7).reshape(2,3,4,5,6,7)
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_8d_int_array(self):
x = numpy.arange(2*3*1*1*4*5*6*7).reshape(2,3,1,1,4,5,6,7)
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_50_doubles(self):
x = numpy.random.random(50)
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_50_float32(self):
x = numpy.random.random(50).astype('float32')
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_50_doubles_1e30(self):
x = numpy.random.random(50) * 1e30
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_50_float32_1e30(self):
x = numpy.random.random(50).astype('float32') * 1e30
y = self.sendAndReceive(x)
self.assertEqual(y.tolist(), x.tolist())
self.assertEqual(numpy.array(x).dtype, y.dtype)
self.assertEqual(numpy.array(x).shape, y.shape)
def test_speed_20000_doubles(self):
x = numpy.random.random(20000)
y = self.sendAndReceive(x)
# Don't print all 20000 floats if fails!
self.assertTrue(y.tolist() == x.tolist())
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(y.shape, x.shape)
def test_speed_5000_doubles_one_by_one(self):
n = 5000
x = numpy.random.random(n)
self.start_time = now()
self.idl('x = dblarr(' + str(n) + ')')
for i in range(n):
self.idl('x[' + str(i) + ']', x[i])
if (i + 1) % 100 == 0:
print((i + 1), end=' ')
sys.stdout.flush()
self.mid_time = now()
y = self.idl.x
self.end_time = now()
self.assertTrue(y.tolist() == x.tolist())
self.assertEqual(y.dtype, x.dtype)
self.assertEqual(y.shape, x.shape)
def test_ev_with_cache(self):
n = 5000
seed = 1
self.idl('y = randomu({}, {})'.format(seed, n))
self.start_time = now()
y_from_ev = self.idl.y
self.mid_time = now()
y_from_ev_cache = self.idl.ev('y', use_cache=True)
self.end_time = now()
self.assertEqual(y_from_ev.shape, (n,))
self.assertEqual(y_from_ev_cache.shape, (n,))
def test_ev_list(self):
n = 5000
seed = 1
self.idl('x = randomu({}, {})'.format(seed, n))
self.idl('y = randomu({}, {})'.format(seed, n))
self.start_time = now()
x_from_ev = self.idl.x
y_from_ev = self.idl.y
self.mid_time = now()
xy_from_ev_list = self.idl.ev_list(['x', 'y'])
self.end_time = now()
# Don't print all values if fails!
self.assertTrue(x_from_ev.tolist() == xy_from_ev_list['x'].tolist())
self.assertTrue(y_from_ev.tolist() == xy_from_ev_list['y'].tolist())
self.assertEqual(xy_from_ev_list['x'].shape, (n,))
self.assertEqual(xy_from_ev_list['y'].shape, (n,))
def test_ev_list_with_cache(self):
n = 5000
seed = 1
self.idl('x = randomu({}, {})'.format(seed, n))
self.idl('y = randomu({}, {})'.format(seed, n))
self.start_time = now()
xy_from_ev_list = self.idl.ev_list(['x','y'])
self.mid_time = now()
self.idl.use_cache = True
xy_from_ev_list_cache = self.idl.ev_list(['x','y'])
self.end_time = now()
# Don't print all values if fails!
self.assertTrue(xy_from_ev_list_cache['x'].tolist() == xy_from_ev_list['x'].tolist())
self.assertTrue(xy_from_ev_list_cache['y'].tolist() == xy_from_ev_list['y'].tolist())
self.assertEqual(xy_from_ev_list['x'].shape, (n,))
self.assertEqual(xy_from_ev_list['y'].shape, (n,))
def test():
"""Run full tests on pIDLy."""
# Use python pidly.py or python3 pidly.py.
if len(sys.argv) > 1 and sys.argv[0].endswith('pidly.py'):
idl = IDL(sys.argv[1])
else:
idl = IDL()
print("pIDLy", __version__ + ": running full tests.")
print("IDL", end=' ')
idl('print,!VERSION')
print("pexpect", pexpect.__version__)
print("Showing (Python -> IDL time) / (IDL -> Python time).\n")
import doctest
import pidly
suite = unittest.TestLoader().loadTestsFromTestCase(TestPidly)
suite.addTest(doctest.DocTestSuite(pidly))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
test()
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 05:24
from __future__ import unicode_literals
import api.models
import custom_storages
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('configurations', '0004_firststagetype'),
]
operations = [
migrations.CreateModel(
name='Agency',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('featured', models.BooleanField(default=False)),
('country_code', models.CharField(blank=True, default='', max_length=255)),
('abbrev', models.CharField(blank=True, default='', max_length=255)),
('type', models.CharField(blank=True, max_length=255, null=True)),
('info_url', models.URLField(blank=True, null=True)),
('wiki_url', models.URLField(blank=True, null=True)),
('description', models.CharField(blank=True, default=None, max_length=2048, null=True)),
('launchers', models.CharField(blank=True, default='', max_length=500)),
('orbiters', models.CharField(blank=True, default='', max_length=500)),
('administrator', models.CharField(blank=True, default=None, max_length=200, null=True)),
('founding_year', models.CharField(blank=True, default=None, max_length=20, null=True)),
('legacy_image_url', models.URLField(blank=True, default=None, null=True)),
('legacy_nation_url', models.URLField(blank=True, default=None, null=True)),
('image_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.AgencyImageStorage(), upload_to=api.models.image_path)),
('logo_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.LogoStorage(), upload_to=api.models.logo_path)),
('nation_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.AgencyNationStorage(), upload_to=api.models.nation_path)),
('agency_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='agency', to='configurations.AgencyType')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_agencies', to='api.Agency')),
],
options={
'verbose_name': 'Agency',
'verbose_name_plural': 'Agencies',
'ordering': ['name', 'featured'],
},
),
migrations.CreateModel(
name='Events',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.CharField(blank=True, default='', max_length=2048)),
('location', models.CharField(blank=True, default='', max_length=100)),
('feature_image', models.FileField(blank=True, default=None, null=True, storage=custom_storages.EventImageStorage(), upload_to=api.models.image_path)),
('date', models.DateTimeField(blank=True, null=True)),
],
options={
'verbose_name': 'Event',
'verbose_name_plural': 'Events',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='FirstStage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reused', models.NullBooleanField()),
],
),
migrations.CreateModel(
name='InfoURLs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('info_url', models.URLField()),
],
options={
'verbose_name': 'Info URL',
'verbose_name_plural': 'Info URLs',
},
),
migrations.CreateModel(
name='Landing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attempt', models.NullBooleanField(default=False)),
('success', models.NullBooleanField()),
('description', models.CharField(blank=True, default='', max_length=2048)),
('landing_location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='landing', to='configurations.LandingLocation')),
('landing_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='landing', to='configurations.LandingType')),
],
),
migrations.CreateModel(
name='Launch',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('launch_library', models.NullBooleanField(default=True)),
('name', models.CharField(blank=True, max_length=255)),
('img_url', models.CharField(blank=True, max_length=255, null=True)),
('net', models.DateTimeField(max_length=255, null=True)),
('window_end', models.DateTimeField(max_length=255, null=True)),
('window_start', models.DateTimeField(max_length=255, null=True)),
('inhold', models.NullBooleanField(default=False)),
('tbdtime', models.NullBooleanField(default=False)),
('tbddate', models.NullBooleanField(default=False)),
('probability', models.IntegerField(blank=True, null=True)),
('holdreason', models.CharField(blank=True, max_length=255, null=True)),
('failreason', models.CharField(blank=True, max_length=255, null=True)),
('hashtag', models.CharField(blank=True, max_length=255, null=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Launch',
'verbose_name_plural': 'Launches',
},
),
migrations.CreateModel(
name='Launcher',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('serial_number', models.CharField(blank=True, max_length=10, null=True)),
('flight_proven', models.BooleanField(default=False)),
('status', models.CharField(blank=True, default='', max_length=255)),
('details', models.CharField(blank=True, default='', max_length=2048)),
],
options={
'verbose_name': 'Launch Vehicle',
'verbose_name_plural': 'Launch Vehicles',
'ordering': ['serial_number'],
},
),
migrations.CreateModel(
name='LauncherConfig',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
('reusable', models.BooleanField(default=False)),
('audited', models.BooleanField(default=False)),
('librarian_notes', models.CharField(blank=True, default='', max_length=2048)),
('description', models.CharField(blank=True, default='', max_length=2048)),
('family', models.CharField(blank=True, default='', max_length=200)),
('full_name', models.CharField(blank=True, default='', max_length=200)),
('variant', models.CharField(blank=True, default='', max_length=200)),
('alias', models.CharField(blank=True, default='', max_length=200)),
('launch_cost', models.CharField(blank=True, max_length=200, null=True, verbose_name='Launch Cost ($)')),
('maiden_flight', models.DateField(blank=True, max_length=255, null=True, verbose_name='Maiden Flight Date')),
('min_stage', models.IntegerField(blank=True, null=True)),
('max_stage', models.IntegerField(blank=True, null=True)),
('length', models.FloatField(blank=True, null=True, verbose_name='Length (m)')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Max Diameter (m)')),
('fairing_diameter', models.FloatField(blank=True, null=True, verbose_name='Max Fairing Diameter (m)')),
('launch_mass', models.IntegerField(blank=True, null=True, verbose_name='Mass at Launch (T)')),
('leo_capacity', models.IntegerField(blank=True, null=True, verbose_name='LEO Capacity (kg)')),
('gto_capacity', models.IntegerField(blank=True, null=True, verbose_name='GTO Capacity (kg)')),
('geo_capacity', models.IntegerField(blank=True, null=True, verbose_name='GEO Capacity (kg)')),
('sso_capacity', models.IntegerField(blank=True, null=True, verbose_name='SSO Capacity (kg)')),
('to_thrust', models.IntegerField(blank=True, null=True, verbose_name='Thrust at Liftoff (kN)')),
('apogee', models.IntegerField(blank=True, null=True, verbose_name='Apogee - Sub-Orbital Only (km)')),
('vehicle_range', models.IntegerField(blank=True, null=True, verbose_name='Vehicle Range - Legacy')),
('info_url', models.CharField(blank=True, default='', max_length=200, null=True)),
('wiki_url', models.CharField(blank=True, default='', max_length=200, null=True)),
('legacy_image_url', models.CharField(blank=True, default='', max_length=200)),
('image_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.LauncherImageStorage(), upload_to=api.models.image_path)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('launch_agency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='launcher_list', to='api.Agency')),
],
options={
'verbose_name': 'Launcher Configurations',
'verbose_name_plural': 'Launcher Configurations',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, default='', max_length=255)),
('country_code', models.CharField(blank=True, default='', max_length=255)),
],
options={
'verbose_name': 'Location',
'verbose_name_plural': 'Locations',
},
),
migrations.CreateModel(
name='Mission',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, default='', max_length=255)),
('description', models.CharField(blank=True, default='', max_length=2048)),
('type', models.IntegerField(blank=True, null=True)),
('type_name', models.CharField(blank=True, default='', max_length=255)),
('mission_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mission', to='configurations.MissionType')),
('orbit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mission', to='configurations.Orbit')),
],
options={
'verbose_name': 'Mission',
'verbose_name_plural': 'Missions',
},
),
migrations.CreateModel(
name='Orbiter',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('agency', models.CharField(default='Unknown', max_length=200)),
('history', models.CharField(default='', max_length=1000)),
('details', models.CharField(default='', max_length=1000)),
('in_use', models.BooleanField(default=True)),
('capability', models.CharField(default='', max_length=2048)),
('maiden_flight', models.DateField(max_length=255, null=True)),
('height', models.FloatField(blank=True, null=True, verbose_name='Length (m)')),
('diameter', models.FloatField(blank=True, null=True, verbose_name='Diameter (m)')),
('human_rated', models.BooleanField(default=False)),
('crew_capacity', models.IntegerField(blank=True, null=True, verbose_name='Crew Capacity')),
('payload_capacity', models.IntegerField(blank=True, null=True, verbose_name='Payload Capacity (kg)')),
('flight_life', models.CharField(blank=True, max_length=2048, null=True)),
('wiki_link', models.URLField(blank=True)),
('info_link', models.URLField(blank=True)),
('image_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.OrbiterImageStorage(), upload_to=api.models.image_path)),
('nation_url', models.FileField(blank=True, default=None, null=True, storage=custom_storages.AgencyNationStorage(), upload_to=api.models.image_path)),
('launch_agency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orbiter_list', to='api.Agency')),
],
options={
'verbose_name': 'Spacecraft',
'verbose_name_plural': 'Spacecraft',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Pad',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('agency_id', models.IntegerField(blank=True, null=True)),
('name', models.CharField(blank=True, default='', max_length=255)),
('info_url', models.URLField(blank=True, null=True)),
('wiki_url', models.URLField(blank=True, null=True)),
('map_url', models.URLField(blank=True, null=True)),
('latitude', models.CharField(blank=True, max_length=30, null=True)),
('longitude', models.CharField(blank=True, max_length=30, null=True)),
('location', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='pad', to='api.Location')),
],
options={
'verbose_name': 'Pad',
'verbose_name_plural': 'Pads',
},
),
migrations.CreateModel(
name='Payload',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, default='', max_length=255)),
('description', models.CharField(blank=True, default='', max_length=2048)),
('weight', models.CharField(blank=True, max_length=255, null=True)),
('dimensions', models.CharField(blank=True, max_length=255, null=True)),
('type', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
('type_name', models.CharField(blank=True, default='', max_length=255)),
('mission', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payloads', to='api.Mission')),
],
),
migrations.CreateModel(
name='Rocket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('configuration', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rocket', to='api.LauncherConfig')),
],
),
migrations.CreateModel(
name='SecondStage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('landing', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='secondstage', to='api.Landing')),
('launcher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secondstage', to='api.Launcher')),
('rocket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secondstage', to='api.Rocket')),
],
),
migrations.CreateModel(
name='VidURLs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vid_url', models.URLField()),
('launch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vid_urls', to='api.Launch')),
],
options={
'verbose_name': 'Video URL',
'verbose_name_plural': 'Video URLs',
},
),
migrations.AddField(
model_name='launcher',
name='launcher_config',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='launcher', to='api.LauncherConfig'),
),
migrations.AddField(
model_name='launch',
name='mission',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='launch', to='api.Mission'),
),
migrations.AddField(
model_name='launch',
name='pad',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='launch', to='api.Pad'),
),
migrations.AddField(
model_name='launch',
name='rocket',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='launch', to='api.Rocket'),
),
migrations.AddField(
model_name='launch',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='launch', to='configurations.LaunchStatus'),
),
migrations.AddField(
model_name='infourls',
name='launch',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='info_urls', to='api.Launch'),
),
migrations.AddField(
model_name='firststage',
name='landing',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='firststage', to='api.Landing'),
),
migrations.AddField(
model_name='firststage',
name='launcher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='firststage', to='api.Launcher'),
),
migrations.AddField(
model_name='firststage',
name='rocket',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='firststage', to='api.Rocket'),
),
migrations.AddField(
model_name='firststage',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='firststage', to='configurations.FirstStageType'),
),
]
| |
#!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import glob
import re
from subprocess import *
from yap_file_io import *
import time
"""
Generates summary file which is a cumulation of the log file and all reports
and graphs generated in the postprocess. Generates special summary tables for
htseq, fastqc, fastqscreen
"""
sample_dict = {}
#Checks the number of system arguments.
if len(sys.argv) < 5:
print "command line : ", "yap_summary -i input_directory_name[full qualified path to yap output directory] \
-o summary_output_path[where you want to create yap_summary output] "
exit()
# Removes trailing '\n'. Checks for the validity of the file paths provided.
# Removes trailing '/' of file path
if os.path.exists(sys.argv[2]):
outputdir = sys.argv[2].strip("\n")
if outputdir[-1] == '/':
outputdir = outputdir[0:-1]
path, dir_name = os.path.split(outputdir)
else:
print "ERROR: Input path specified doesn't exist."
exit()
print "Beginning YAP Summary", time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
summary_dir = sys.argv[4] + "/" + dir_name + "_summary"
# Remove, previous summary_dir, make a new summary directory and a temp
# directory in it only if the path exists
if os.path.exists(summary_dir):
prm = Popen("rm -r " + summary_dir, shell='True').wait()
pmkdir = Popen("mkdir " + summary_dir, shell='True').wait()
# print glob.glob(sys.argv[4]+'/*')
temp_dir = summary_dir + "/" + "temp"
ptemp = Popen("mkdir " + temp_dir, shell='True').wait()
summary_dir = glob.glob(summary_dir)[0]
# Searches output directory at one level
sample_list = glob.glob(outputdir + "/*")
regroup_list = []
for i in sample_list:
if i.split('/')[-1] == "regroup_output":
regroup_list = glob.glob(i + "/*")
sample_list = sample_list + regroup_list
# Delete all sample directories with no output.
for i in sample_list:
if os.path.isdir(i) and os.path.split(i)[-1] != 'consolidated_output' and \
os.path.split(i)[-1] != 'yap_log_files' and \
os.path.split(i)[-1] != dir_name + '_summary':
for j in glob.glob(i + '/*'):
if os.path.isdir(j):
if os.path.exists(j + "/preprocess_output") and \
os.path.exists(j + "/aligner_output") and \
os.path.exists(j + "/postprocess_output"):
if glob.glob(j + "/preprocess_output/*") == [] and \
glob.glob(j + "/aligner_output/*") == [] and \
glob.glob(j + "/postprocess_output/*") == []:
os.system('rm -r ' + i)
# If all sample directories are empty, script exits out
if len(sample_list) == 0:
print "Error : Opening the directory,", outputdir, " No such input directory "
print "please provide the YAP output directory path as input"
print "command line : ", "yap_summary output_directory_name"
exit()
print "Creating YAP Summary output...."
# Declares a list for each tool/software, for which,
# summary files are going to be generated
fastqc_list = []
fastq_screen_list = []
summary_file = ''
htseq_list = []
rna_bias_list = []
gc_bias_list = []
insert_size_list = []
qs_cycle_list = []
qs_distribution_list = []
mark_dup_list = []
align_sum_list = []
exon_count_list = []
junc_count_list = []
uk_junc_list = []
cufflinks_list = []
hs_list = []
target_pcr_list = []
#define variables for eqp ( specific to in-house function, neglect otherwise)
eqp_gene=[]
eqp_junc=[]
eqp_exon=[]
# Fetches list of inputs, barcodes, htseq, fastqc and fastq screen files
# with full path and appends them to their respective lists.
# Else, seeks out the workflow_summary.txt to get the provenance.
for i in range(len(sample_list)):
if glob.glob(sample_list[i] + '/*/postprocess_output/'):
barcode_list = glob.glob(sample_list[i] + "/*")
sample_dict[str(sample_list[i])] = barcode_list
if glob.glob(sample_list[i] + '/*/postprocess_output/*htseq-count.out'):
htseq_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*htseq-count.out'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*CollectRnaSeqMetrics.txt'):
rna_bias_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*CollectRnaSeqMetrics.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*CollectInsertSizeMetrics.txt'):
insert_size_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*CollectInsertSizeMetrics.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*GcBiasMetrics_summary.txt'):
gc_bias_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*GcBiasMetrics_summary.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*MeanQualityByCycle.txt'):
qs_cycle_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*MeanQualityByCycle.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*CollectTargetedPcrMetrics.txt'):
target_pcr_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*CollectTargetedPcrMetrics.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*CalculateHsMetrics.txt'):
hs_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*CalculateHsMetrics.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*QualityScoreDistribution.txt'):
qs_distribution_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*QualityScoreDistribution.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*MarkDuplicates.txt'):
mark_dup_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*MarkDuplicates.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*CollectAlignmentSummaryMetrics.txt'):
align_sum_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*CollectAlignmentSummaryMetrics.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*exoncount_summary.txt'):
exon_count_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*exoncount_summary.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*junctioncount_summary.txt'):
junc_count_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*junctioncount_summary.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/*unknown_junction.txt'):
uk_junc_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/*unknown_junction.txt'))
if glob.glob(sample_list[i] + '/*/postprocess_output/genes.fpkm_tracking'):
cufflinks_list.append(
glob.glob(
sample_list[i] +
'/*/postprocess_output/genes.fpkm_tracking'))
#fetch files specific to eqp ( in-house functionality, neglect otherwise)
if glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-exon.cnt'):
eqp_exon.append(glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-exon.cnt'))
if glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-gene.cnt'):
eqp_gene.append(glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-gene.cnt'))
if glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-junction.cnt'):
eqp_junc.append(glob.glob(sample_list[i]+'/*/postprocess_output/*merge_eqp_counts-junction.cnt'))
if glob.glob(sample_list[i] + '/*/preprocess_output/'):
barcode_list = glob.glob(sample_list[i] + "/*")
sample_dict[str(sample_list[i])] = barcode_list
if glob.glob(sample_list[i] + '/*/preprocess_output/*_fastqc/summary.txt'):
fastqc_list.append(
glob.glob(
sample_list[i] +
'/*/preprocess_output/*_fastqc/summary.txt'))
if glob.glob(sample_list[i] + '/*/preprocess_output/*_screen.txt'):
fastq_screen_list.append(
glob.glob(
sample_list[i] +
'/*/preprocess_output/*_screen.txt'))
else:
log_file = re.match(r'(.*)_summary.txt', sample_list[i], re.M | re.I)
if log_file:
summary_file = sample_list[i]
def output_summary(sample_dict):
""" Produces a pdf file containing the txt, png result files across all samples run.
Accepts the sample_dict {sample:barcode} and merges into one pdf. """
complete_dict = {}
all_pdfs = []
main_page = temp_dir + "/" + dir_name + "_" + "front"
fw_main_page = open(main_page + ".txt", "wb")
fw_main_page.write("\n\n\n")
fw_main_page.write("\t\t\t\t\tYAP Postprocess Summary Results" + "\n\n")
fw_main_page.write("Workflow Name= " + dir_name + "\n")
fw_main_page.write("Input Directory Path Provided= " + outputdir + "\n")
fw_main_page.write("Summary Ouput Directory Path= " + summary_dir + "\n")
fw_main_page.close()
for k in range(len(sorted(sample_dict.iterkeys()))):
filename_key = ''
filename_key = sorted(sample_dict.iterkeys())[k]
path, file_name = os.path.split(filename_key)
for j in range(len(sample_dict[filename_key])):
barcode_path = ''
barcode_path = sample_dict[filename_key][j]
temp_pdfs = []
pdf_files = []
path, barcode_name = os.path.split(barcode_path)
aligner_dir_path = ''
postprocess_dir_path = barcode_path + "/" + "postprocess_output"
pdf_files = glob.glob(postprocess_dir_path + "/*.pdf")
text_files = glob.glob(postprocess_dir_path + "/*.txt")
sample_page = temp_dir + "/" + file_name + \
"_" + barcode_name + "_" + "main"
fw_sample_page = open(sample_page + ".txt", "a+")
fw_sample_page.write("\n\n\n\n\n")
fw_sample_page.write("\t\t\t\t\tSample : " + file_name + "\n")
fw_sample_page.write("\t\t\t\t\tBarcode : " + barcode_name)
fw_sample_page.close()
if len(text_files) > 0:
for i in range(0, len(text_files)):
if not text_files[i].find('exoncount.txt'):
if not text_files[i].find('junctioncount.txt'):
if not text_files[i].find('unknown_junction.txt'):
post_path, post_file_name = os.path.split(
text_files[i])
post_file_name, exten = os.path.splitext(
post_file_name)
txt_pdf = Popen(
"text2pdf.py " +
text_files[i] +
" -o " +
temp_dir +
"/" +
str(k) +
"_" +
str(j) +
"_" +
str(i) +
".pdf",
shell='False').wait()
temp_pdfs.append(
temp_dir +
"/" +
str(k) +
"_" +
str(j) +
"_" +
str(i) +
".pdf")
txt_pdf = Popen(
"text2pdf.py " +
sample_page +
".txt -o " +
sample_page +
".pdf",
shell='False').wait()
all_pdfs.append(sample_page + ".pdf")
all_pdfs.extend(temp_pdfs)
all_pdfs.extend(pdf_files)
final_pdf_files = []
txt_pdf = Popen("text2pdf.py " + main_page + ".txt -o " +
main_page + ".pdf", shell='False').wait()
if summary_file != '':
path, sufile = os.path.split(summary_file)
su_file, ext = os.path.splitext(sufile)
su_file = temp_dir + '/' + su_file
txt_pdf = Popen(
"text2pdf.py " +
summary_file +
" -o " +
su_file +
".pdf",
shell='False').wait()
final_pdf_files.append(su_file + ".pdf")
final_pdf_files.insert(0, main_page + ".pdf")
final_pdf_files.extend(all_pdfs)
final_input_pdfs = ''
summary_final_pdf = summary_dir + "/" + dir_name + "_summary" + ".pdf"
for i in range(0, len(final_pdf_files)):
if len(final_input_pdfs) + len(summary_final_pdf) > 8170:
summary_temp_pdf = summary_dir + "/temp/" + \
dir_name + "_temp_summary_" + str(i) + ".pdf"
txt_pdf = Popen("mergepdf.py -i " + final_input_pdfs +
" -o " + summary_temp_pdf, shell='False').wait()
final_input_pdfs = summary_temp_pdf + " "
final_input_pdfs += final_pdf_files[i] + " "
txt_pdf = Popen("mergepdf.py -i " + final_input_pdfs +
" -o " + summary_final_pdf, shell='False').wait()
ptemp = Popen("rm -r " + temp_dir, shell='True').wait()
#
#
#
def count_summary(count_list, file_ext):
""" Summarizes count files, namely - HTSeq and Exon counts
Takes the list of file paths and the desired output file extension.
Writes out a flat file listing results across all samples. """
count_arr = []
count_dict = {}
count_gene_set = set()
sample_set = set()
for i in range(len(count_list)):
for j in count_list[i]:
if j and os.path.getsize(j) > 0:
path = j.split('/')
sample_set.update(['/'.join(path[:-3])])
with open(j, 'rb') as foo:
count_arr = foo.readlines()
foo.close()
for k in count_arr:
temp = k.strip('\n').split('\t')
count_gene_set.update([temp[0]])
count_dict[((path[-4], path[-3]), temp[0])] = temp[1]
bar_str = ''
file_str = '\t'
for k in sorted(sample_set):
temp1 = k.split('/')
file_str = file_str + temp1[-1]
for v in sample_dict[k]:
temp = v.split('/')
bar_str = bar_str + '\t' + temp[-1]
file_str = file_str + '\t'
bar_str = "BARCODE" + bar_str.rstrip('\t') + '\n'
file_str = "SAMPLE" + file_str.rstrip('\t') + '\n'
with open(summary_dir + '/' + dir_name + file_ext + '.txt', 'a+') as ct:
ct.write(file_str + bar_str)
for g in sorted(count_gene_set):
ct.write(g)
for v in sorted(sample_dict.values()):
for l in v:
path = l.split('/')
try:
ct.write('\t' + count_dict[((path[-2], path[-1]), g)])
except KeyError:
pass
ct.write('\n')
ct.close()
def unknown_junc_summary(uk_list, file_ext):
""" Summarizes unknown junctions across all samples.
Takes the list of file paths and the desired output file extension. """
with open(summary_dir + '/' + dir_name + file_ext, 'w') as uk:
for i in range(len(uk_list)):
for j in uk_list[i]:
if j:
uk_file_arr = []
path = j.split('/')
with open(j, 'rb') as foo:
uk = foo.readlines()
foo.close()
file_str = path[-4] + '\n' + path[-3] + '\n'
uk.write(file_str)
for k in uk_file_arr:
uk.write(k)
uk.write('\n')
uk.close()
def junc_count_summary(file_list, file_ext):
""" Summarizes count files, namely - Junction counts
Takes the list of file paths and the desired output file extension. """
junc = []
file_dict = {}
gene_set = set()
sample_set = set()
junc_sample_dict = {}
for j in file_list:
for i in j:
file_con = []
file_con = read_file(i)
path = []
path = i.split('/')
sample_set.update(['/'.join(path[:-3])])
sample = path[-4]
barcode = path[-3]
for line in file_con:
full_key = tuple()
junction, count, key = line.strip().rstrip('\n').split('\t')
full_key = ((sample, barcode), key)
gene_set.update([key])
try:
if full_key in file_dict:
file_dict[full_key] += int(count)
else:
file_dict[full_key] = int(count)
except KeyError as e:
print e
bar_str = '\t'
file_str = '\t'
for k in sorted(sample_set):
temp1 = k.split('/')
file_str = file_str + temp1[-1]
for v in sample_dict[k]:
temp = v.split('/')
bar_str = bar_str + '\t' + temp[-1]
file_str = file_str + '\t'
junc_sample_dict[k] = v
bar_str = 'BARCODE' + bar_str.rstrip('\t') + '\n'
file_str = 'SAMPLE' + file_str.rstrip('\t') + '\n'
with open(summary_dir + '/' + dir_name + file_ext + '.txt', 'w') as hq:
hq.write(file_str + bar_str)
for g in sorted(gene_set):
hq.write(g)
for k in sorted(sample_set):
for v in sample_dict[k]:
path = v.split('/')
try:
hq.write(
'\t' + str(file_dict[((path[-2], path[-1]), g)]))
except KeyError:
hq.write('\tN/A')
hq.write('\n')
hq.close()
# Summarizes cufflinks generated files across samples
# Takes the list of file paths and the desired output file extension.
# Writes out a flat file listing results across all samples.
def cufflinks_summary(cuff_list, file_ext):
""" Summarizes cufflinks generated files across samples.
Takes the list of file paths and the desired output file extension. """
cuff = []
cuff_dict = {}
cuff_gene_set = set()
sample_set = set()
cuff_sample_dict = {}
for i in range(len(cuff_list)):
for j in cuff_list[i]:
if j:
path = j.split('/')
sample_set.update(['/'.join(path[:-3])])
with open(j, 'rb') as foo:
htseq = foo.readlines()
foo.close()
for k in range(1, len(htseq)):
temp = []
temp = htseq[k].strip('\n').split('\t')
gene_string = temp[0] + '|' + temp[6] + '|'
cuff_gene_set.update([gene_string])
cuff_dict[
((path[-4], path[-3]), gene_string)] = temp[-4] + "\t" + temp[-1]
bar_str = ''
file_str = '\t'
fpkm_str = ''
for k in sorted(sample_set):
temp1 = k.split('/')
file_str = file_str + temp1[-1]
for v in sample_dict[k]:
temp = v.split('/')
bar_str = bar_str + '\t\t' + temp[-1]
file_str = file_str + '\t\t'
fpkm_str = fpkm_str + '\tFPKM\tFPKM_Status'
cuff_sample_dict[k] = v
bar_str = 'BARCODE\t' + bar_str.rstrip('\t').lstrip('\t') + '\n'
file_str = 'SAMPLE' + file_str.rstrip('\t') + '\n'
fpkm_str = 'TRACKING_ID' + fpkm_str + '\n'
with open(summary_dir + '/' + dir_name + file_ext, 'w') as hq:
hq.write(file_str + bar_str + fpkm_str)
for g in sorted(cuff_gene_set):
hq.write(g)
for k in sorted(sample_set):
for v in sample_dict[k]:
path = v.split('/')
try:
hq.write('\t' + cuff_dict[((path[-2], path[-1]), g)])
except KeyError:
hq.write('\tN/A')
hq.write('\n')
hq.close()
# Transposes the fastqc_summ list from the fastqc_summary function. fout is the file handle of the output file.
# if Flag == True then it is treated as a title string.
def fastqc_transpose(fin, fout, flag):
""" Transposes the fastqc_summ list from the fastqc_summary function. fout is the file handle of the output file.
if Flag == True then it is treated as a title string. """
file_name = fin.split(
'/')[-5] + "-" + fin.split('/')[-2].rstrip('.fq_fastqc')
with open(fin) as foo:
ls = foo.readlines()
foo.close()
entry = dict()
tup = ()
sample = set()
vals = set()
for i in range(len(ls)):
ls[i] = ls[i].rstrip('\n').split('\t')
ls[i][2], junk = os.path.split(ls[i][2])
sample.update([file_name])
vals.update([ls[i][1]])
tup = (file_name, ls[i][1])
entry[tup] = ls[i][0]
with open(fout, 'a+') as f:
if flag == 'True':
outstring = ''
outstring = outstring + 'Sample_ID\t'
for v in vals:
outstring = outstring + v + '\t'
f.write(outstring.rstrip('\t'))
f.write('\n')
for i in sample:
out_string = ''
out_string = out_string + i + '\t'
for j in vals:
out_string = out_string + entry[(i, j)] + '\t'
f.write(out_string.rstrip('\t'))
f.write('\n')
def fastqc_summary(sample_dict):
""" Generates the summary files across all samples for results produced by the FastQC package. """
count = 0
print "Generating Fastqc summary.."
for j in sample_dict:
fastqc_summ = j
for k in range(len(fastqc_summ)):
fout = summary_dir + "/" + dir_name + "_FastQC_summary_report.txt"
if count == 0:
fastqc_transpose(fastqc_summ[k], fout, 'True')
count += 1
else:
fastqc_transpose(fastqc_summ[k], fout, 'False')
def summary_contaminants(fin, fout, flag):
""" Transposes the fastqscreen_summ list from the fastqscreen_summary function. fout is the file handle of the output file.
if Flag == True then it is treated as a title string. """
with open(fin) as foo:
line = foo.readlines()
foo.close()
file = fin.split('/')[-4]
if fin.split('/')[-3] != 'no_barcode_specified':
file = file + ',' + os.path.split(fin)[-3]
with open(fout, 'a+') as f:
out_string = ''
header = len(line)
if line[-1].startswith('%'):
header -= 2
for i in range(len(line)):
if line[i].startswith('#'):
header -= 1
if not line[i].startswith('#'):
if re.search('.+\t.+\t.+', line[i]):
line[i] = line[i].replace('\n', '\t')
if line[i].startswith('Library') and flag == 'True':
out_string = out_string + 'Sample_ID\t'
out_string = out_string + \
line[i] * (header - 1) + '\n' + file + '\t'
elif line[i].startswith('Library') and flag == 'False':
out_string = out_string + file + '\t'
else:
out_string = out_string + line[i]
f.write(out_string.rstrip('\t'))
f.write('\n')
def fastq_screen_summary(sample_dict):
""" Generates the summary files across all samples for results produced by the Fastqscreen package. """
count = 0
for j in sample_dict:
count = count + 1
for k in range(len(j)):
fout = summary_dir + "/" + dir_name + \
"_FastqScreen_summary_report.txt"
if count == 1:
summary_contaminants(j[k], fout, 'True')
else:
summary_contaminants(j[k], fout, 'False')
def collect_metrics_summary(mark_dup_list, file_string):
""" Generates the summary files across all samples for results produced by the Picardtools across all samples except QS Distribution and QS cycle. """
dict_md = {}
title_md = ''
sample = ''
barcode = ''
mark_dup_summ = ''
list_md = []
length_title = []
row_summ = []
for i in range(len(mark_dup_list)):
for k in range(len(mark_dup_list[i])):
with open(mark_dup_list[i][k]) as mark_fh:
file_md = mark_fh.read()
mark_fh.close()
list_md = file_md.split('#')
for l in range(len(list_md)):
match_md = re.match(
r'\s*METRICS CLASS\t.*\n(.*)\n', list_md[l], re.I | re.M)
if match_md:
row_summ = list_md[l][1:].rstrip(
'\n').strip('\t ').split('\n')
mark_title = []
mark_value = []
for m in range(1, len(row_summ)):
if m == 1:
mark_title = row_summ[m].split('\t')
else:
row_summ[m] = row_summ[m].rstrip('\n')
row_summ[m] = row_summ[m].rstrip('\t')
mark_value.append(
row_summ[m].rstrip('\t').split('\t'))
for n in range(len(mark_value)):
for j in range(len(mark_value[n])):
dict_md[mark_dup_list[i][k].split(
'/')[-4], mark_dup_list[i][k].split('/')[-3], n, mark_title[j]] = mark_value[n][j]
mark_dup_summ = summary_dir + "/" + dir_name + \
'_' + file_string + '_summary.txt'
title_md = '\t'.join(mark_title)
with open(mark_dup_summ, 'a+') as mark_fout:
mark_fout.write("SAMPLE\tBARCODE\t" + title_md + '\n')
for i in sorted(sample_dict.keys()):
path, sample = os.path.split(i)
for j in sorted(sample_dict[i]):
path2, barcode = os.path.split(j)
out_string = ''
for l in range(len(mark_value)):
out_string = out_string + sample + '\t' + barcode + '\t'
for k in mark_title:
try:
out_string = out_string + \
dict_md[sample, barcode, l, k]
except KeyError:
out_string = out_string + 'N/A'
out_string = out_string + '\t'
out_string.rstrip('\t')
out_string = out_string + '\n'
mark_fout.write(out_string.rstrip('\t'))
mark_fout.write('\n')
def quality_score_summary(qs_list, phrase):
""" Generates the summary files across all samples for results produced by the Picardtools - QS Distribution and QS cycle. """
match_qs = {}
for i in range(len(qs_list)):
for j in range(len(qs_list[i])):
barcode = ''
sample = ''
barcode = qs_list[i][j].split('/')[-3]
sample = qs_list[i][j].split('/')[-4]
with open(qs_list[i][j]) as qs:
qs_file = qs.read()
qs.close()
match_summary_qs = re.search(
'HISTOGRAM\t[\w\.]+\n(.*)', qs_file, re.DOTALL)
if match_summary_qs:
match_qs[sample, barcode] = (
match_summary_qs.group(1).split('\n'))
qs_file_string = summary_dir + '/' + dir_name + '_' + phrase + '.txt'
with open(qs_file_string, 'a+') as qs_out:
qs_array = []
out_string = ''
for k in sample_dict:
path, temp_sample = os.path.split(k)
for v in sample_dict[k]:
path1, temp_bar = os.path.split(v)
out_string = out_string + temp_sample + '\t' + temp_bar + '\t'
if (temp_sample, temp_bar) in match_qs:
qs_array.append(match_qs[temp_sample, temp_bar])
qs_out.write(out_string.rstrip('\t'))
qs_out.write('\n')
count = 0
while count != len(qs_array[0]):
out_string = ''
for i in qs_array:
out_string = out_string + i[count] + '\t'
qs_out.write(out_string.rstrip('\t'))
qs_out.write('\n')
count = count + 1
qs_out.close()
output_summary(sample_dict)
if len(htseq_list) > 0:
print "Generating HTSeq summary..."
count_summary(htseq_list, '_htseq_summary.raw')
if len(exon_count_list) > 0:
print "Generating YAP Exon counts summary..."
count_summary(exon_count_list, '_exon_count_summary')
if len(junc_count_list) > 0:
print "Generating YAP Junction counts summary..."
junc_count_summary(junc_count_list, '_junction_count_summary')
if len(uk_junc_list) > 0:
print "Generating Unknown Junction summary..."
unknown_junc_summary(uk_junc_list, '_unknown_junction_summary')
if len(fastqc_list) > 0:
print "Generating FastQC summary..."
fastqc_summary(fastqc_list)
if len(fastq_screen_list) > 0:
print "Generating Fastq Screen summary..."
fastq_screen_summary(fastq_screen_list)
if len(rna_bias_list) > 0:
print "Generating RNA Bias summary..."
collect_metrics_summary(rna_bias_list, 'RnaSeqMetrics')
if len(gc_bias_list) > 0:
print "Generating GC Bias summary..."
collect_metrics_summary(gc_bias_list, 'GcBiasMetrics')
if len(mark_dup_list) > 0:
print "Generating Mark Duplicates summary..."
collect_metrics_summary(mark_dup_list, 'MarkDuplicates')
if len(insert_size_list) > 0:
print "Generating Insert Size summary..."
collect_metrics_summary(insert_size_list, 'InsertSizeMetrics')
if len(target_pcr_list) > 0:
print "Generating Targeted Pcr Metrics summary..."
collect_metrics_summary(target_pcr_list, 'TargetedPcrMetrics')
if len(hs_list) > 0:
print "Generating Calculated HS Metrics summary..."
collect_metrics_summary(hs_list, 'CalculateHsMetrics')
if len(align_sum_list) > 0:
print "Generating Aligner Metrics summary..."
collect_metrics_summary(align_sum_list, 'AlignmentSummaryMetrics')
if len(qs_distribution_list) > 0:
print "Generating Picard-Quality Score Distribution summary..."
quality_score_summary(qs_distribution_list, 'QSdistribution')
if len(qs_cycle_list) > 0:
print "Generating Picard-QS_Cycle summary..."
quality_score_summary(qs_cycle_list, 'QScycle')
if len(cufflinks_list) > 0:
print "Generating Cufflinks summary..."
cufflinks_summary(cufflinks_list, '_cufflinks_summary.fpkm')
#Generating summary for EQP results (meant for in-house use only, neglect otherwise)
def mapped_read_count(input,output):
sample=input.split("/")[-1]
barcode_list=sample_dict[input]
for barcode in barcode_list:
total=(0,0)
barcode_str=barcode.split("/")[-1]
cmd_string='cat '+barcode+'/aligner_output/*_count.log | grep -A 3 combined.sam.gz |grep reads\ are\ mapped'
cmd_out=Popen(cmd_string,stdout=PIPE,shell=True).communicate()[0]
read_pattern=re.findall(r"(\d+) of (\d+) reads are mapped \([\d\.]+\%\)\.",cmd_out)
for x,v in read_pattern:
total = ((total[0] + int(x)) , (total[1] + int(v)))
output.write(sample+"\t"+barcode_str+"\t"+str(total[0])+"\t"+str(total[1])+"\n")
for i in sample_list:
if glob.glob(i+"/*/aligner_output/*_count.log"):
print "Generating EQP Total Mapped Read counts summary..."
eqp_map=open(summary_dir+"/eqp_mapped_read_count_summary.txt","a")
eqp_map.write("SAMPLE\tBARCODE\tMAPPED_READS\tTOTAL_READS\n")
mapped_read_count(i,eqp_map)
eqp_map.close()
if len(eqp_gene)>0:
print "Generating EQP Gene summary..."
count_summary(eqp_gene,'_merge_eqp_counts-gene_summary')
if len(eqp_junc)>0:
print "Generating EQP Junction summary..."
count_summary(eqp_junc,'_merge_eqp_counts-junction_summary')
if len(eqp_exon)>0:
print "Generating EQP Exon summary..."
count_summary(eqp_exon,'_merge_eqp_counts-exon_summary')
print "YAP summary finished!", time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
| |
import pygame
from pygame.locals import *
import copy
class HUDElement:
"""
Generic part of a heads-up display
@author: James Heslin (PROGRAM_IX)
"""
def __init__(self, label, colour, visible=True):
"""
Constructs a new HUDElement
@type label: string
@param label: Identifier of the element
@type colour: pygame.Colour
@param colour: Colour of the element
@type visible: boolean
@param visible: Whether the element is visible
@author: James Heslin (PROGRAM_IX)
"""
self.label = label
self.colour = colour
self.visible = visible
def draw(self, screen):
"""
Draw the element to the screen
@type screen: pygame.Surface
@param screen: The surface onto which the game will be rendered
@author: James Heslin (PROGRAM_IX)
"""
pass
class HUDText(HUDElement):
"""
An element of a heads-up display consisting of text
@author: James Heslin (PROGRAM_IX)
"""
letters = {
'a': ((-5, -10), (-5, 15), (-5, 0), (5, 0), (5, 15),
(5, -10), (-5, -10)),
'b': ((-5, -10), (-5, 15), (5, 15), (5, 0), (-5, 0),
(0, 0), (0, -10), (-5, -10)),
'c': ((5, -10), (-5, -10), (-5, 15), (5, 15)),
'd': ((0, -10), (-5, -10), (-5, 15), (0, 15), (5, 10),
(5, -5), (0, -10)),
'e': ((5, -10), (-5, -10), (-5, 0), (0, 0), (-5, 0),
(-5, 15), (5, 15)),
'f': ((5, -10), (-5, -10), (-5, 0), (0, 0), (-5, 0),
(-5, 15)),
'g': ((5, -10), (-5, -10), (-5, 15), (5, 15), (5, 0),
(0, 0)),
'h': ((-5, -10), (-5, 15), (-5, 0), (5, 0), (5, -10),
(5, 15)),
'i': ((-5, -10), (5, -10), (0, -10), (0, 15), (-5, 15),
(5, 15)),
'j': ((-5, -10), (5, -10), (0, -10), (0, 15), (-5, 15),
(-5, 10)),
'k': ((-5, -10), (-5, 0), (5, -10), (-5, 0), (5, 15),
(-5, 0), (-5, 15)),
'l': ((-5, -10), (-5, 15), (5, 15)),
'm': ((-5, 15), (-5, -10), (0, -10), (0, 0), (0, -10),
(5, -10), (5, 15)),
'n': ((-5, 15), (-5, -10), (5, 15), (5, -10)),
'o': ((-5, -10), (-5, 15), (5, 15), (5, -10), (-5, -10)),
'p': ((-5, 15), (-5, -10), (5, -10), (5, 0), (-5, 0)),
'q': ((-5, -10), (-5, 10), (0, 10), (0, 15), (5, 15),
(0, 15), (0, 10), (5, 10), (5, -10), (-5, -10)),
'r': ((-5, 15), (-5, -10), (5, -10), (5, 0), (-5, 0),
(5, 15)),
's': ((5, -10), (-5, -10), (-5, 0), (5, 0), (5, 15),
(-5, 15)),
't': ((-5, -10), (5, -10), (0, -10), (0, 15)),
'u': ((-5, -10), (-5, 15), (5, 15), (5, -10)),
'v': ((-5, -10), (0, 15), (5, -10)),
'w': ((-5, -10), (-5, 15), (0, 15), (0, 0), (0, 15),
(5, 15), (5, -10)),
'x': ((-5, -10), (5, 15), (0, 0), (-5, 15), (5, -10)),
'y': ((-5, -10), (0, 0), (-5, 15), (5, -10)),
'z': ((-5, -10), (5, -10), (-5, 15), (5, 15)),
'1': ((-5, -5), (0, -10), (0, 15), (-5, 15), (5, 15)),
'2': ((-5, -5), (-5, -10), (5, -10), (5, -5), (-5, 15), (5, 15)),
'3': ((-5, -10), (5, -10), (0, 0), (5, 5), (0, 15), (-5, 15)),
'4': ((0, 15), (0, -10), (-5, 0), (5, 0)),
'5': ((5, -10), (-5, -10), (-5, 0), (0, 0), (5, 5), (5, 10),
(0, 15), (-5, 15)),
'6': ((5, -10), (-5, 0), (-5, 15), (5, 15), (5, 0), (-5, 0)),
'7': ((-5, -10), (5, -10), (-5, 15)),
'8': ((-5, -10), (5, -10), (5, -5), (0, 0), (-5, 5),
(-5, 15), (5, 15), (5, 5), (0, 0), (-5, -5), (-5, -10)),
'9': ((5, 15), (5, -10), (-5, -10), (-5, 0), (5, 0)),
'0': ((5, 15), (-5, -10), (-5, 15), (5, 15), (5, -10), (-5, -10))
}
def __init__(self, label, colour, text, pos, size, width, visible=True):
"""
@type label: string
@param label: Identifier of the text
@type colour: pygame.Color
@param colour: Colour of the text
@type text: string
@param text: Text to display
@type pos: list/tuple containing two ints
@param pos: Coordinates of text start point
@type visible: boolean
@param visible: Whether the text is visible
@author: James Heslin (PROGRAM_IX)
"""
HUDElement.__init__(self, label, colour, visible)
self.text = text
self.pos = pos
self.size = size
self.width = width
def draw(self, screen):
"""
Render the text to the screen
@type screen: pygame.Surface
@param screen: The screen onto which the text should be rendered
@author: James Heslin (PROGRAM_IX)
"""
c_pos = self.pos
for letter in xrange(len(self.text)):
if self.text[letter] in self.letters:
a = self.letters[self.text[letter]]
last = a[0]
for pt in a:
pygame.draw.line(screen, self.colour,
(last[0]*self.size+c_pos[0],
last[1]*self.size+c_pos[1]),
(pt[0]*self.size+c_pos[0],
pt[1]*self.size+c_pos[1]),
self.width)
last = pt
#print "DRAWING",self.text[letter]
c_pos = (c_pos[0] + self.size * 15, c_pos[1])
class HUDLine(HUDElement):
"""
An element of a heads-up display consisting of a line
@author: James Heslin (PROGRAM_IX)
"""
def __init__(self, label, colour, line, visible=True):
"""
Constructs a new HUDLine
@type label: string
@param label: Identifier of the line
@type colour: pygame.Color
@param colour: Colour of the line
@type line: list/tuple containing start position tuple (int, int),
end position tuple (int, int), and width (int)
@param line: Line arguments
@type visible: boolean
@param visible: Whether the line is visible
@author: James Heslin (PROGRAM_IX)
"""
HUDElement.__init__(self, label, colour, visible)
self.line = line
def draw(self, screen):
"""
Render the line to the screen
@type screen: pygame.Surface
@param screen: The screen onto which the line should be rendered
@author: James Heslin (PROGRAM_IX)
"""
pygame.draw.line(screen, self.colour, self.line[0], self.line[1],
self.line[-1])
class HUDPolygon(HUDElement):
"""
An element of a heads-up display consisting of a polygon
@author: James Heslin (PROGRAM_IX)
"""
def __init__(self, label, colour, lines, visible=True):
"""
@type label: string
@param label: Identifier of the polygon
@type colour: pygame.Colour
@param colour: Colour of the polygon
@type lines: list/tuple containing a tuple of points (each (int, int))
and an int
@param lines: Lines portion of the element
@type visible: boolean
@param visible: Whether the element is visible
@author: James Heslin (PROGRAM_IX)
"""
HUDElement.__init__(self, label, colour, visible)
self.lines = lines
def draw(self, screen):
"""
Render the polygon to the screen
@type screen: pygame.Surface
@param screen: The screen onto which the polygon is to be rendered
@author: James Heslin (PROGRAM_IX)
"""
pygame.draw.polygon(screen, self.colour, self.lines[:-1],
self.lines[-1])
class HUD:
"""
A heads-up display, which comprises various visual elements displayed on a
screen to give information to a player
@author: James Heslin (PROGRAM_IX)
"""
def __init__(self):
"""
Constructs a new HUD
@author: James Heslin (PROGRAM_IX)
"""
self.elements = []
def add(self, hud_el):
"""
Add a new element to the HUD
@author: James Heslin (PROGRAM_IX)
"""
self.elements.append(hud_el)
def remove(self, hud_el):
"""
Remove an element from the HUD
@author: James Heslin (PROGRAM_IX)
"""
self.elements.remove(hud_el)
def draw(self, screen):
"""
Renders all elements of the HUD to the screen
@type screen: pygame.Surface
@param screen: The screen onto which the HUD is to be rendered
@author: James Heslin (PROGRAM_IX)
"""
for e in self.elements:
if e.visible:
e.draw(screen)
def get(self, label):
"""
Returns a HUDElement with matching label from elements, otherwise
returns None
@type label: string
@param label: The label of the HUDElement to retrieve
@rtype: HUDElement or None
@return: The HUDElement with the specified label
@author: James Heslin (PROGRAM_IX)
"""
for e in self.elements:
if e.label == label:
return e
return None
| |
import math
from myhdl import always, Signal, intbv, concat, always_seq, instances, block, modbv
from hdmi.models.constants import CONTROL_TOKEN
class EncoderModel(object):
"""
A non convertible model to simulate the behaviour of
a TMDS and TERC4 encoder.
Args:
clock: pixel clock as input
reset: asynchronous reset input (active high)
video_in: video input of a single channel
audio_in: audio input
c0: used to determine preamble
c1: used to determine preamble
vde: video data enable
ade: audio data enable
data_out: 10 bit parallel output
channel: Indicates 'RED', 'GREEN' or 'BLUE' channel
Example:
.. code-block:: python
encoder_model = EncoderModel(*params)
process_inst = encoder_model.process()
process_inst.run_sim()
"""
def __init__(self, clock, reset, video_in, audio_in, c0, c1, vde, ade,
data_out, channel='BLUE'):
self.channel = channel
self.clock = clock
self.reset = reset
self.video_in = video_in
self.audio_in = audio_in
self.c0 = c0
self.c1 = c1
self.vde = vde
self.ade = ade
self.data_out = data_out
self.color_depth = int(math.log(video_in.max, 2))
@block
def process(self):
"""
It simulates the encoding process of the TMDS encoder.
Example:
.. code-block:: python
process_inst = encoder_model.process()
process_inst.run_sim()
"""
video_guard_band = {
'BLUE': int('1011001100', 2),
'GREEN': int('0100110011', 2)
}.get(self.channel, int('1011001100', 2))
data_island_guard_band = {
'GREEN': int('0100110011', 2),
'RED': int('0100110011', 2)
}.get(self.channel, 0)
no_of_ones_video_in = Signal(intbv(0)[math.log(self.color_depth, 2) + 1:])
decision1 = Signal(bool(0))
decision2 = Signal(bool(0))
decision3 = Signal(bool(0))
# input video delayed by a clock cycle
_video_in = Signal(intbv(0, min=self.video_in.min,
max=self.video_in.max))
# 1 bit more than the input (Signal after first stage of encoding the input)
q_m = Signal(intbv(0, min=self.video_in.min,
max=self.video_in.max * 2))
no_of_ones_q_m = Signal(intbv(0)[math.log(self.color_depth, 2)+1:])
no_of_zeros_q_m = Signal(intbv(0)[math.log(self.color_depth, 2)+1:])
count = Signal(modbv(0)[5:0])
# delayed versions of vde signal
_vde, __vde = [Signal(bool(0)) for _ in range(2)]
# delayed versions of ade signal
_ade, __ade, ___ade, ____ade = [Signal(bool(0)) for _ in range(4)]
# delayed versions of c0 signal
_c0, __c0 = [Signal(bool(0)) for _ in range(2)]
# delayed versions of c1 signal
_c1, __c1 = [Signal(bool(0)) for _ in range(2)]
# delayed versions of audio_in signal
_audio_in, __audio_in = [Signal(intbv(0, min=self.audio_in.min,
max=self.audio_in.max)) for _ in range(2)]
_q_m = Signal(intbv(0, min=self.video_in.min,
max=self.video_in.max * 2))
# Digital island guard band period
digb_period = Signal(bool(0))
ade_vld = Signal(bool(0))
audio_in_vld = Signal(intbv(0, min=self.audio_in.min,
max=self.audio_in.max))
@always(self.clock.posedge)
def sequential_logic():
no_of_ones_video_in.next = bin(self.video_in).count("1")
_video_in.next = self.video_in
no_of_ones_q_m.next = bin(q_m[8:0]).count("1")
no_of_zeros_q_m.next = 8 - bin(q_m[8:0]).count("1")
_vde.next = self.vde
__vde.next = _vde
_ade.next = self.ade
__ade.next = _ade
___ade.next = __ade
____ade.next = ___ade
_c0.next = self.c0
__c0.next = _c0
_c1.next = self.c1
__c1.next = _c1
_audio_in.next = self.audio_in
__audio_in.next = _audio_in
_q_m.next = q_m
@always(____ade, self.ade, __ade, no_of_ones_video_in, _video_in, count,
no_of_ones_q_m, no_of_zeros_q_m, q_m, digb_period, __c1, __c0,
__audio_in, decision1)
def continuous_assignment():
digb_period.next = (not __ade) and (____ade or self.ade)
decision1.next = (no_of_ones_video_in > 4) or \
(no_of_ones_video_in == 4 and not _video_in[0])
decision2.next = (count == 0) | (no_of_zeros_q_m == no_of_ones_q_m)
decision3.next = (not count[4]) & (no_of_ones_q_m > no_of_zeros_q_m) | \
(count[4]) & (no_of_ones_q_m < no_of_zeros_q_m)
if self.channel == "BLUE":
ade_vld.next = self.ade | __ade | ____ade
if digb_period:
audio_in_vld.next = concat(bool(1), bool(1), __c1, __c0)
else:
audio_in_vld.next = concat(__audio_in[3], __audio_in[2], __c1, __c0)
else:
ade_vld.next = __ade
audio_in_vld.next = __audio_in
q_m.next[0] = _video_in[0]
temp = _video_in[0]
for i in range(1, self.color_depth):
temp = (temp ^ (not _video_in[i] if decision1 else _video_in[i]))
q_m.next[i] = 1 if temp else 0
q_m.next[self.color_depth] = 0 if decision1 else 1
@always_seq(self.clock.posedge, reset=self.reset)
def output_logic():
if __vde:
if decision2:
self.data_out.next[9] = not _q_m[8]
self.data_out.next[8] = _q_m[8]
if _q_m[8]:
self.data_out.next[8:0] = _q_m[8:0]
count.next = count + no_of_ones_q_m - no_of_zeros_q_m
else:
self.data_out.next[8:0] = ~_q_m[8:0]
count.next = count + no_of_zeros_q_m - no_of_ones_q_m
elif decision3:
self.data_out.next[9] = True
self.data_out.next[8] = _q_m[8]
self.data_out.next[8:0] = ~_q_m[8:0]
count.next = count - concat(_q_m[8], bool(0)) + no_of_zeros_q_m - no_of_ones_q_m
else:
self.data_out.next[9] = False
self.data_out.next[8] = _q_m[8]
self.data_out.next[8:0] = _q_m[8:0]
count.next = count - concat(not _q_m[8], bool(0)) + no_of_ones_q_m - no_of_zeros_q_m
else:
if self.vde:
self.data_out.next = video_guard_band
elif ade_vld:
terc4_encoding = ['1010011100',
'1001100011',
'1011100100',
'1011100010',
'0101110001',
'0100011110',
'0110001110',
'0100111100',
'1011001100',
'0100111001',
'0110011100',
'1011000110',
'1010001110',
'1001110001',
'0101100011',
'1011000011']
self.data_out.next = int(terc4_encoding[audio_in_vld], 2)
elif (self.ade | ____ade) and (self.channel != "BLUE"):
self.data_out.next = data_island_guard_band
else:
concat_c = concat(__c1, __c0)
self.data_out.next = CONTROL_TOKEN[concat_c]
count.next = 0
return instances()
| |
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import functools
import logging
import math
import os
import random
import socket
import stat
import string
import threading
from collections import defaultdict
from botocore.exceptions import IncompleteReadError, ReadTimeoutError
from s3transfer.compat import SOCKET_ERROR, fallocate, rename_file
MAX_PARTS = 10000
# The maximum file size you can upload via S3 per request.
# See: http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
# and: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
MAX_SINGLE_UPLOAD_SIZE = 5 * (1024 ** 3)
MIN_UPLOAD_CHUNKSIZE = 5 * (1024 ** 2)
logger = logging.getLogger(__name__)
S3_RETRYABLE_DOWNLOAD_ERRORS = (
socket.timeout,
SOCKET_ERROR,
ReadTimeoutError,
IncompleteReadError,
)
def random_file_extension(num_digits=8):
return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
def signal_not_transferring(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and hasattr(
request.body, 'signal_not_transferring'
):
request.body.signal_not_transferring()
def signal_transferring(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and hasattr(
request.body, 'signal_transferring'
):
request.body.signal_transferring()
def calculate_num_parts(size, part_size):
return int(math.ceil(size / float(part_size)))
def calculate_range_parameter(
part_size, part_index, num_parts, total_size=None
):
"""Calculate the range parameter for multipart downloads/copies
:type part_size: int
:param part_size: The size of the part
:type part_index: int
:param part_index: The index for which this parts starts. This index starts
at zero
:type num_parts: int
:param num_parts: The total number of parts in the transfer
:returns: The value to use for Range parameter on downloads or
the CopySourceRange parameter for copies
"""
# Used to calculate the Range parameter
start_range = part_index * part_size
if part_index == num_parts - 1:
end_range = ''
if total_size is not None:
end_range = str(total_size - 1)
else:
end_range = start_range + part_size - 1
range_param = f'bytes={start_range}-{end_range}'
return range_param
def get_callbacks(transfer_future, callback_type):
"""Retrieves callbacks from a subscriber
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future the subscriber is associated
to.
:type callback_type: str
:param callback_type: The type of callback to retrieve from the subscriber.
Valid types include:
* 'queued'
* 'progress'
* 'done'
:returns: A list of callbacks for the type specified. All callbacks are
preinjected with the transfer future.
"""
callbacks = []
for subscriber in transfer_future.meta.call_args.subscribers:
callback_name = 'on_' + callback_type
if hasattr(subscriber, callback_name):
callbacks.append(
functools.partial(
getattr(subscriber, callback_name), future=transfer_future
)
)
return callbacks
def invoke_progress_callbacks(callbacks, bytes_transferred):
"""Calls all progress callbacks
:param callbacks: A list of progress callbacks to invoke
:param bytes_transferred: The number of bytes transferred. This is passed
to the callbacks. If no bytes were transferred the callbacks will not
be invoked because no progress was achieved. It is also possible
to receive a negative amount which comes from retrying a transfer
request.
"""
# Only invoke the callbacks if bytes were actually transferred.
if bytes_transferred:
for callback in callbacks:
callback(bytes_transferred=bytes_transferred)
def get_filtered_dict(original_dict, whitelisted_keys):
"""Gets a dictionary filtered by whitelisted keys
:param original_dict: The original dictionary of arguments to source keys
and values.
:param whitelisted_key: A list of keys to include in the filtered
dictionary.
:returns: A dictionary containing key/values from the original dictionary
whose key was included in the whitelist
"""
filtered_dict = {}
for key, value in original_dict.items():
if key in whitelisted_keys:
filtered_dict[key] = value
return filtered_dict
class CallArgs:
def __init__(self, **kwargs):
"""A class that records call arguments
The call arguments must be passed as keyword arguments. It will set
each keyword argument as an attribute of the object along with its
associated value.
"""
for arg, value in kwargs.items():
setattr(self, arg, value)
class FunctionContainer:
"""An object that contains a function and any args or kwargs to call it
When called the provided function will be called with provided args
and kwargs.
"""
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
def __repr__(self):
return 'Function: {} with args {} and kwargs {}'.format(
self._func, self._args, self._kwargs
)
def __call__(self):
return self._func(*self._args, **self._kwargs)
class CountCallbackInvoker:
"""An abstraction to invoke a callback when a shared count reaches zero
:param callback: Callback invoke when finalized count reaches zero
"""
def __init__(self, callback):
self._lock = threading.Lock()
self._callback = callback
self._count = 0
self._is_finalized = False
@property
def current_count(self):
with self._lock:
return self._count
def increment(self):
"""Increment the count by one"""
with self._lock:
if self._is_finalized:
raise RuntimeError(
'Counter has been finalized it can no longer be '
'incremented.'
)
self._count += 1
def decrement(self):
"""Decrement the count by one"""
with self._lock:
if self._count == 0:
raise RuntimeError(
'Counter is at zero. It cannot dip below zero'
)
self._count -= 1
if self._is_finalized and self._count == 0:
self._callback()
def finalize(self):
"""Finalize the counter
Once finalized, the counter never be incremented and the callback
can be invoked once the count reaches zero
"""
with self._lock:
self._is_finalized = True
if self._count == 0:
self._callback()
class OSUtils:
_MAX_FILENAME_LEN = 255
def get_file_size(self, filename):
return os.path.getsize(filename)
def open_file_chunk_reader(self, filename, start_byte, size, callbacks):
return ReadFileChunk.from_filename(
filename, start_byte, size, callbacks, enable_callbacks=False
)
def open_file_chunk_reader_from_fileobj(
self,
fileobj,
chunk_size,
full_file_size,
callbacks,
close_callbacks=None,
):
return ReadFileChunk(
fileobj,
chunk_size,
full_file_size,
callbacks=callbacks,
enable_callbacks=False,
close_callbacks=close_callbacks,
)
def open(self, filename, mode):
return open(filename, mode)
def remove_file(self, filename):
"""Remove a file, noop if file does not exist."""
# Unlike os.remove, if the file does not exist,
# then this method does nothing.
try:
os.remove(filename)
except OSError:
pass
def rename_file(self, current_filename, new_filename):
rename_file(current_filename, new_filename)
def is_special_file(cls, filename):
"""Checks to see if a file is a special UNIX file.
It checks if the file is a character special device, block special
device, FIFO, or socket.
:param filename: Name of the file
:returns: True if the file is a special file. False, if is not.
"""
# If it does not exist, it must be a new file so it cannot be
# a special file.
if not os.path.exists(filename):
return False
mode = os.stat(filename).st_mode
# Character special device.
if stat.S_ISCHR(mode):
return True
# Block special device
if stat.S_ISBLK(mode):
return True
# Named pipe / FIFO
if stat.S_ISFIFO(mode):
return True
# Socket.
if stat.S_ISSOCK(mode):
return True
return False
def get_temp_filename(self, filename):
suffix = os.extsep + random_file_extension()
path = os.path.dirname(filename)
name = os.path.basename(filename)
temp_filename = name[: self._MAX_FILENAME_LEN - len(suffix)] + suffix
return os.path.join(path, temp_filename)
def allocate(self, filename, size):
try:
with self.open(filename, 'wb') as f:
fallocate(f, size)
except OSError:
self.remove_file(filename)
raise
class DeferredOpenFile:
def __init__(self, filename, start_byte=0, mode='rb', open_function=open):
"""A class that defers the opening of a file till needed
This is useful for deferring opening of a file till it is needed
in a separate thread, as there is a limit of how many open files
there can be in a single thread for most operating systems. The
file gets opened in the following methods: ``read()``, ``seek()``,
and ``__enter__()``
:type filename: str
:param filename: The name of the file to open
:type start_byte: int
:param start_byte: The byte to seek to when the file is opened.
:type mode: str
:param mode: The mode to use to open the file
:type open_function: function
:param open_function: The function to use to open the file
"""
self._filename = filename
self._fileobj = None
self._start_byte = start_byte
self._mode = mode
self._open_function = open_function
def _open_if_needed(self):
if self._fileobj is None:
self._fileobj = self._open_function(self._filename, self._mode)
if self._start_byte != 0:
self._fileobj.seek(self._start_byte)
@property
def name(self):
return self._filename
def read(self, amount=None):
self._open_if_needed()
return self._fileobj.read(amount)
def write(self, data):
self._open_if_needed()
self._fileobj.write(data)
def seek(self, where, whence=0):
self._open_if_needed()
self._fileobj.seek(where, whence)
def tell(self):
if self._fileobj is None:
return self._start_byte
return self._fileobj.tell()
def close(self):
if self._fileobj:
self._fileobj.close()
def __enter__(self):
self._open_if_needed()
return self
def __exit__(self, *args, **kwargs):
self.close()
class ReadFileChunk:
def __init__(
self,
fileobj,
chunk_size,
full_file_size,
callbacks=None,
enable_callbacks=True,
close_callbacks=None,
):
"""
Given a file object shown below::
|___________________________________________________|
0 | | full_file_size
|----chunk_size---|
f.tell()
:type fileobj: file
:param fileobj: File like object
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callbacks: A list of function(amount_read)
:param callbacks: Called whenever data is read from this object in the
order provided.
:type enable_callbacks: boolean
:param enable_callbacks: True if to run callbacks. Otherwise, do not
run callbacks
:type close_callbacks: A list of function()
:param close_callbacks: Called when close is called. The function
should take no arguments.
"""
self._fileobj = fileobj
self._start_byte = self._fileobj.tell()
self._size = self._calculate_file_size(
self._fileobj,
requested_size=chunk_size,
start_byte=self._start_byte,
actual_file_size=full_file_size,
)
# _amount_read represents the position in the chunk and may exceed
# the chunk size, but won't allow reads out of bounds.
self._amount_read = 0
self._callbacks = callbacks
if callbacks is None:
self._callbacks = []
self._callbacks_enabled = enable_callbacks
self._close_callbacks = close_callbacks
if close_callbacks is None:
self._close_callbacks = close_callbacks
@classmethod
def from_filename(
cls,
filename,
start_byte,
chunk_size,
callbacks=None,
enable_callbacks=True,
):
"""Convenience factory function to create from a filename.
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callbacks: function(amount_read)
:param callbacks: Called whenever data is read from this object.
:type enable_callbacks: bool
:param enable_callbacks: Indicate whether to invoke callback
during read() calls.
:rtype: ``ReadFileChunk``
:return: A new instance of ``ReadFileChunk``
"""
f = open(filename, 'rb')
f.seek(start_byte)
file_size = os.fstat(f.fileno()).st_size
return cls(f, chunk_size, file_size, callbacks, enable_callbacks)
def _calculate_file_size(
self, fileobj, requested_size, start_byte, actual_file_size
):
max_chunk_size = actual_file_size - start_byte
return min(max_chunk_size, requested_size)
def read(self, amount=None):
amount_left = max(self._size - self._amount_read, 0)
if amount is None:
amount_to_read = amount_left
else:
amount_to_read = min(amount_left, amount)
data = self._fileobj.read(amount_to_read)
self._amount_read += len(data)
if self._callbacks is not None and self._callbacks_enabled:
invoke_progress_callbacks(self._callbacks, len(data))
return data
def signal_transferring(self):
self.enable_callback()
if hasattr(self._fileobj, 'signal_transferring'):
self._fileobj.signal_transferring()
def signal_not_transferring(self):
self.disable_callback()
if hasattr(self._fileobj, 'signal_not_transferring'):
self._fileobj.signal_not_transferring()
def enable_callback(self):
self._callbacks_enabled = True
def disable_callback(self):
self._callbacks_enabled = False
def seek(self, where, whence=0):
if whence not in (0, 1, 2):
# Mimic io's error for invalid whence values
raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)")
# Recalculate where based on chunk attributes so seek from file
# start (whence=0) is always used
where += self._start_byte
if whence == 1:
where += self._amount_read
elif whence == 2:
where += self._size
self._fileobj.seek(max(where, self._start_byte))
if self._callbacks is not None and self._callbacks_enabled:
# To also rewind the callback() for an accurate progress report
bounded_where = max(min(where - self._start_byte, self._size), 0)
bounded_amount_read = min(self._amount_read, self._size)
amount = bounded_where - bounded_amount_read
invoke_progress_callbacks(
self._callbacks, bytes_transferred=amount
)
self._amount_read = max(where - self._start_byte, 0)
def close(self):
if self._close_callbacks is not None and self._callbacks_enabled:
for callback in self._close_callbacks:
callback()
self._fileobj.close()
def tell(self):
return self._amount_read
def __len__(self):
# __len__ is defined because requests will try to determine the length
# of the stream to set a content length. In the normal case
# of the file it will just stat the file, but we need to change that
# behavior. By providing a __len__, requests will use that instead
# of stat'ing the file.
return self._size
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
# This is a workaround for http://bugs.python.org/issue17575
# Basically httplib will try to iterate over the contents, even
# if its a file like object. This wasn't noticed because we've
# already exhausted the stream so iterating over the file immediately
# stops, which is what we're simulating here.
return iter([])
class StreamReaderProgress:
"""Wrapper for a read only stream that adds progress callbacks."""
def __init__(self, stream, callbacks=None):
self._stream = stream
self._callbacks = callbacks
if callbacks is None:
self._callbacks = []
def read(self, *args, **kwargs):
value = self._stream.read(*args, **kwargs)
invoke_progress_callbacks(self._callbacks, len(value))
return value
class NoResourcesAvailable(Exception):
pass
class TaskSemaphore:
def __init__(self, count):
"""A semaphore for the purpose of limiting the number of tasks
:param count: The size of semaphore
"""
self._semaphore = threading.Semaphore(count)
def acquire(self, tag, blocking=True):
"""Acquire the semaphore
:param tag: A tag identifying what is acquiring the semaphore. Note
that this is not really needed to directly use this class but is
needed for API compatibility with the SlidingWindowSemaphore
implementation.
:param block: If True, block until it can be acquired. If False,
do not block and raise an exception if cannot be acquired.
:returns: A token (can be None) to use when releasing the semaphore
"""
logger.debug("Acquiring %s", tag)
if not self._semaphore.acquire(blocking):
raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
def release(self, tag, acquire_token):
"""Release the semaphore
:param tag: A tag identifying what is releasing the semaphore
:param acquire_token: The token returned from when the semaphore was
acquired. Note that this is not really needed to directly use this
class but is needed for API compatibility with the
SlidingWindowSemaphore implementation.
"""
logger.debug(f"Releasing acquire {tag}/{acquire_token}")
self._semaphore.release()
class SlidingWindowSemaphore(TaskSemaphore):
"""A semaphore used to coordinate sequential resource access.
This class is similar to the stdlib BoundedSemaphore:
* It's initialized with a count.
* Each call to ``acquire()`` decrements the counter.
* If the count is at zero, then ``acquire()`` will either block until the
count increases, or if ``blocking=False``, then it will raise
a NoResourcesAvailable exception indicating that it failed to acquire the
semaphore.
The main difference is that this semaphore is used to limit
access to a resource that requires sequential access. For example,
if I want to access resource R that has 20 subresources R_0 - R_19,
this semaphore can also enforce that you only have a max range of
10 at any given point in time. You must also specify a tag name
when you acquire the semaphore. The sliding window semantics apply
on a per tag basis. The internal count will only be incremented
when the minimum sequence number for a tag is released.
"""
def __init__(self, count):
self._count = count
# Dict[tag, next_sequence_number].
self._tag_sequences = defaultdict(int)
self._lowest_sequence = {}
self._lock = threading.Lock()
self._condition = threading.Condition(self._lock)
# Dict[tag, List[sequence_number]]
self._pending_release = {}
def current_count(self):
with self._lock:
return self._count
def acquire(self, tag, blocking=True):
logger.debug("Acquiring %s", tag)
self._condition.acquire()
try:
if self._count == 0:
if not blocking:
raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
else:
while self._count == 0:
self._condition.wait()
# self._count is no longer zero.
# First, check if this is the first time we're seeing this tag.
sequence_number = self._tag_sequences[tag]
if sequence_number == 0:
# First time seeing the tag, so record we're at 0.
self._lowest_sequence[tag] = sequence_number
self._tag_sequences[tag] += 1
self._count -= 1
return sequence_number
finally:
self._condition.release()
def release(self, tag, acquire_token):
sequence_number = acquire_token
logger.debug("Releasing acquire %s/%s", tag, sequence_number)
self._condition.acquire()
try:
if tag not in self._tag_sequences:
raise ValueError("Attempted to release unknown tag: %s" % tag)
max_sequence = self._tag_sequences[tag]
if self._lowest_sequence[tag] == sequence_number:
# We can immediately process this request and free up
# resources.
self._lowest_sequence[tag] += 1
self._count += 1
self._condition.notify()
queued = self._pending_release.get(tag, [])
while queued:
if self._lowest_sequence[tag] == queued[-1]:
queued.pop()
self._lowest_sequence[tag] += 1
self._count += 1
else:
break
elif self._lowest_sequence[tag] < sequence_number < max_sequence:
# We can't do anything right now because we're still waiting
# for the min sequence for the tag to be released. We have
# to queue this for pending release.
self._pending_release.setdefault(tag, []).append(
sequence_number
)
self._pending_release[tag].sort(reverse=True)
else:
raise ValueError(
"Attempted to release unknown sequence number "
"%s for tag: %s" % (sequence_number, tag)
)
finally:
self._condition.release()
class ChunksizeAdjuster:
def __init__(
self,
max_size=MAX_SINGLE_UPLOAD_SIZE,
min_size=MIN_UPLOAD_CHUNKSIZE,
max_parts=MAX_PARTS,
):
self.max_size = max_size
self.min_size = min_size
self.max_parts = max_parts
def adjust_chunksize(self, current_chunksize, file_size=None):
"""Get a chunksize close to current that fits within all S3 limits.
:type current_chunksize: int
:param current_chunksize: The currently configured chunksize.
:type file_size: int or None
:param file_size: The size of the file to upload. This might be None
if the object being transferred has an unknown size.
:returns: A valid chunksize that fits within configured limits.
"""
chunksize = current_chunksize
if file_size is not None:
chunksize = self._adjust_for_max_parts(chunksize, file_size)
return self._adjust_for_chunksize_limits(chunksize)
def _adjust_for_chunksize_limits(self, current_chunksize):
if current_chunksize > self.max_size:
logger.debug(
"Chunksize greater than maximum chunksize. "
"Setting to %s from %s." % (self.max_size, current_chunksize)
)
return self.max_size
elif current_chunksize < self.min_size:
logger.debug(
"Chunksize less than minimum chunksize. "
"Setting to %s from %s." % (self.min_size, current_chunksize)
)
return self.min_size
else:
return current_chunksize
def _adjust_for_max_parts(self, current_chunksize, file_size):
chunksize = current_chunksize
num_parts = int(math.ceil(file_size / float(chunksize)))
while num_parts > self.max_parts:
chunksize *= 2
num_parts = int(math.ceil(file_size / float(chunksize)))
if chunksize != current_chunksize:
logger.debug(
"Chunksize would result in the number of parts exceeding the "
"maximum. Setting to %s from %s."
% (chunksize, current_chunksize)
)
return chunksize
| |
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for accounts_lib."""
from __future__ import print_function
import json
import sys
import mock
from chromite.lib import accounts_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import user_db
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
EMPTY_ACCOUNTS_DB_WITH_COMMENTS = """
{
# This accounts spec is empty.
"users": [
],
"groups": [
]
}
"""
MINIMAL_DB_USER = accounts_lib.User(
name='minimal', password='!', uid=1000, group_name='minimal',
description='', home='/dev/null', shell='/bin/false',
is_fixed_id=False, is_defunct=False)
MINIMAL_DB_GROUP = accounts_lib.Group(
name='minimal', password='!', gid=1000, users=['minimal'],
is_fixed_id=False, is_defunct=False)
MINIMAL_ACCOUNTS_DB = """
{
"users": [
{
# Minimal user.
"user": "minimal",
"uid": 1000,
"group_name": "minimal"
}
],
"groups": [
{
# Minimal group.
"group": "minimal",
"gid": 1000,
"users": [ "minimal" ]
}
]
}
"""
EXTRA_USER_SPEC_FIELD_DB = """
{
"users": [
{
"user": "minimal",
"uid": 1000,
"group_name": "minimal",
"gecos": "minimal user spec",
"extra": "This field is not expected."
}
]
}
"""
class AccountDatabaseTest(cros_test_lib.MockTestCase):
"""Tests for chromite.lib.accounts_lib.AccountDatabase."""
def _ParseSpec(self, contents, db=None):
"""Return a AccountDatabase that has read a file with |contents|.
Args:
contents: desired contents of accounts database to parse.
db: existing account db to override with new definitions.
Returns:
an instance of AccountDatabase.
"""
if db is None:
db = accounts_lib.AccountDatabase()
with self.PatchObject(osutils, 'ReadFile', return_value=contents):
db.AddAccountsFromDatabase('ignored')
return db
def _ParseSpecs(self, specs):
"""Return a AccountDatabase based on the account database stack in |specs|.
Args:
specs: list of json fragments (encoded as strings) to compose into a
consistent account database. This list is assumed to be in
increasing priority order so that later entries override earlier
entries.
Returns:
an instance of AccountDatabase.
"""
db = accounts_lib.AccountDatabase()
for spec in specs:
self._ParseSpec(spec, db=db)
return db
def testParsesEmptyDb(self):
"""Test that we can parse an empty database."""
self._ParseSpec(json.dumps({}))
def testParsesDbWithComments(self):
"""Test that we handle comments properly."""
self._ParseSpec(EMPTY_ACCOUNTS_DB_WITH_COMMENTS)
def testRejectsUnkownDbKeys(self):
"""Test that we check the set of keys specified in the account database."""
self.assertRaises(ValueError,
self._ParseSpec,
json.dumps({'foo': 'This is not a valid field.'}))
def testRejectsBadKeyValues(self):
"""Check that typecheck user/group specs."""
self.assertRaises(ValueError,
self._ParseSpec,
json.dumps({'users': 'This should be a list'}))
self.assertRaises(ValueError,
self._ParseSpec,
json.dumps({'groups': 'This should be a list'}))
def testRejectsExtraUserSpecFields(self):
"""Test that we check for extra user spec fields."""
self.assertRaises(ValueError, self._ParseSpec, EXTRA_USER_SPEC_FIELD_DB)
def testParsesMinimalDb(self):
"""Test that we can parse a basic database."""
db = self._ParseSpec(MINIMAL_ACCOUNTS_DB)
self.assertEqual(1, len(list(db.users)))
self.assertEqual(1, len(list(db.groups)))
self.assertIn(MINIMAL_DB_USER.name, db.users)
self.assertIn(MINIMAL_DB_GROUP.name, db.groups)
self.assertEqual(db.users[MINIMAL_DB_USER.name], MINIMAL_DB_USER)
self.assertEqual(db.groups[MINIMAL_DB_GROUP.name], MINIMAL_DB_GROUP)
def testComposesDbs(self):
"""Test that we can compose databases from multiple overlays."""
BASE_ID = 1000
OVERRIDE_ID = 2000
BASE_NAME = 'base'
OVERRIDE_NAME = 'override'
EXTRA_USER = 'extra.user'
base_db = json.dumps({
'users': [
{'user': BASE_NAME,
'uid': BASE_ID,
'group_name': 'base.group',
},
{'user': OVERRIDE_NAME,
'uid': OVERRIDE_ID - 1,
'group_name': 'override.group',
},
],
'groups': [
{'group': BASE_NAME,
'gid': BASE_ID,
'users': ['base.user']
},
{'group': OVERRIDE_NAME,
'gid': OVERRIDE_ID - 1,
'users': ['override.user']
},
],
})
override_db = json.dumps({
'users': [
{'user': OVERRIDE_NAME,
'uid': OVERRIDE_ID,
'group_name': 'override.group',
},
{'user': EXTRA_USER,
'uid': 3000,
'group_name': OVERRIDE_NAME,
},
],
'groups': [
{'group': OVERRIDE_NAME,
'gid': OVERRIDE_ID,
'users': [OVERRIDE_NAME, EXTRA_USER],
},
],
})
db = self._ParseSpecs([base_db, override_db])
self.assertEqual(3, len(db.users))
self.assertEqual(2, len(db.groups))
self.assertEqual(BASE_ID, db.users[BASE_NAME].uid)
self.assertEqual(BASE_ID, db.groups[BASE_NAME].gid)
self.assertEqual(OVERRIDE_ID, db.users[OVERRIDE_NAME].uid)
self.assertEqual(OVERRIDE_ID, db.groups[OVERRIDE_NAME].gid)
self.assertEqual(sorted([OVERRIDE_NAME, EXTRA_USER]),
sorted(db.groups[OVERRIDE_NAME].users))
def testInstallUser(self):
"""Test that we can install a user correctly."""
db = self._ParseSpec(MINIMAL_ACCOUNTS_DB)
mock_user_db = mock.MagicMock()
db.InstallUser(MINIMAL_DB_USER.name, mock_user_db)
installed_user = user_db.User(
user=MINIMAL_DB_USER.name, password=MINIMAL_DB_USER.password,
uid=MINIMAL_DB_USER.uid, gid=MINIMAL_DB_GROUP.gid,
gecos=MINIMAL_DB_USER.description, home=MINIMAL_DB_USER.home,
shell=MINIMAL_DB_USER.shell)
self.assertEqual([mock.call.AddUser(installed_user)],
mock_user_db.mock_calls)
def testInstallGroup(self):
"""Test that we can install a group correctly."""
db = self._ParseSpec(MINIMAL_ACCOUNTS_DB)
mock_user_db = mock.MagicMock()
db.InstallGroup(MINIMAL_DB_GROUP.name, mock_user_db)
installed_group = user_db.Group(
group=MINIMAL_DB_GROUP.name, password=MINIMAL_DB_GROUP.password,
gid=MINIMAL_DB_GROUP.gid, users=MINIMAL_DB_GROUP.users)
self.assertEqual([mock.call.AddGroup(installed_group)],
mock_user_db.mock_calls)
| |
import sys
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7')
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages')
import socket
import sqlite3
import struct
import pcapy
import datetime
import time
import os
import optparse
import glob
from exp_description import *
import matplotlib.pyplot as plt
import scipy as stats
import statsmodels.api as sm
#import numpy as np
import sys,getopt
def get_discovery_retries(startstep):
retries=[startstep]
while retries[-1] < dead/1000:
retrystep = 2**len(retries)
if retrystep < dead/1000:
retries.append(retrystep)
else:
break
return retries
def experiment_analyzer(sdratio):
'''
'''
global conn
global exp
count = 0
c = conn.cursor()
x = []
y=[]
delay = []
pkt_delay = []
time_offset = []
for n in range(4):
# print n
delay.append([])
pkt_delay.append([])
# print dead
for i in range(exp.get_run_count()):
#delay[n].append(30000)
delay[n].append(dead)
#pkt_delay[n].append(30000)
pkt_delay[n].append(dead)
#loop over run sequence
fail = 0
succeeded = 0
timedout = 0
valid_results = [0,0,0,0]
num_responders = len(exp.get_responders())
needed = min(num_responders, int(round(sdratio * num_responders) / 100 + 0.5))
for run_number in range(exp.get_run_count()):
###### SETUP LEVELS OF ALL FACTORS FOR THIS RUN ########
exp.set_run_number(run_number)
run_definition = exp.get_run_identifier()
#print "exporting run number %d with combination %s" %(run_number,run_definition)
#print "run_definition", run_definition
c.execute("SELECT RunID FROM run_ids WHERE run_identifier=?",[run_definition])
run_id = c.fetchone()[0]
fact = exp.get_current_factor_level_by_id('fact_pairs')
#print fact
run_fact = exp.get_current_factor_level_by_id('fact_replication_id')
all_nodes = exp.get_all_spec_nodes()
for node in all_nodes:
if node['real_id']== exp.get_requester()["real_id"]:
#events=run_analyzer(run_id,node,c)
#for deadline in range(30):
# resp[deadline] = resp[deadline] + get_responsiveness(run_id, node, deadline*1000)
#delay[run_number] = get_delay(run_id, node)
#res=check_packets(run_id, node, 30000)
res = check_packets(run_id, node, dead)
# checks for invalid runs and excludes the response times
actornodes = [node]
actornodes = actornodes + exp.get_responders()
fails = check_routes(run_id, actornodes)
#print res
if res == -1 or fails > 0:
fail = fail + 1
else:
index=valid_results[fact]
delse = get_delay(run_id, node, needed)
delay[fact][index] = delse
x.append(delse)
y.append(run_id)
valid_results[fact] = valid_results[fact] + 1
if delse < dead:
succeeded = succeeded + 1
else:
timedout = timedout + 1
break
sys.stdout.write("\rAnalyzed Runs: %d, Valid: %d, Succeeded: %d, Timed out: %d, Failed: %d" % (run_number+1, valid_results[0], succeeded, timedout, fail))
sys.stdout.flush()
sys.stdout.write("\n")
######## RESPONSIVENESS 1zu1 ############
#for i in range(250):
# print delay[3][i]
symbols = ['k-','k--','k-.','k:']
#for fact in range(4):
fact=0
res = []
#z=stats.norm.cdf(x)
#print z
for i in range(30000):
ok = 0
#print pkt_delay
for n in range(valid_results[fact]):
if pkt_delay[fact][n]<i:
ok = ok + 1
#print ok
res.append((ok*100/valid_results[fact])*0.01)
ecdf = sm.distributions.ECDF(x)
### Plotting starts here ###
fn_split=fn.split("_")
# print "Client: %s" % fn_split[0]
# print "Provider: %s" % fn_split[1]
# print "%d VoIP Streams Load" % int(fn_split[3])
if int(fn_split[3]) > 0:
legend_string = "%d VoIP Streams Load" % int(fn_split[3])
else:
legend_string = "No Load"
plt.figure(1)
plt.plot(ecdf.x, ecdf.y, linestyle='-', drawstyle='steps', label=legend_string)
#Checks for validity of the routes from the ExtraRunMeasurements
def check_route(run_id, node):
# c.execute("SELECT Content FROM ExtraRunMeasurements WHERE runID=? and nodeID=?", [run_id,node['real_id']] )
c.execute("SELECT Content FROM ExtraRunMeasurements WHERE runID=? and nodeID=?", [run_id,node['real_id']] )
routes = str(c.fetchone())
if 'fail' in routes :
# print run_id,node['real_id']
# print " invalid run"
#fail = fail+1
return 1
def check_routes(run_id, actornodes):
actorsstring = ', '.join('?' * len(actornodes))
query_string = "SELECT count(NodeID) FROM ExtraRunMeasurements WHERE runID=? and Content like 'fail%%' and nodeID in (%s)" % actorsstring
query_args = [run_id]
for actor in actornodes:
query_args.append(actor['real_id'])
c.execute(query_string, query_args)
fails = c.fetchone()[0]
# if fails > 0:
# print "Run %d, Failed routes %s" % (run_id, fails)
return fails
def check_packets(run_id, node, deadline_ms):
'''
Steps: First select packets of this run, "sent" and "received". "sent" is to be handled with care, as previously sent packets
can be received again and have to be filtered out. This unfortunately can also be packets from other runs.
When a packet from another run is detected as sent, it can be removed from the list, when a response arrives
within the search window, then this search is false positive and must FAIL.
'''
# first get start/stop times
c.execute("SELECT CommonTime FROM Events WHERE runID=? and nodeID=? and EventType='sd_start_search'", [run_id,node['real_id']] )
#print "start_search"
rows = c.fetchone()
#print rows
if rows==None:
#print "Error, no search found in run ", run_id, node['real_id']
return -1
start_search_time = rows[0]
start = db_timestamp_to_datetime(start_search_time)
c.execute("SELECT CommonTime FROM Events WHERE runID=? and nodeID=? and EventType='sd_service_add'", [run_id,node['real_id']] )
#print "sd_service_add"
find_result = c.fetchone()
#print find_result
if find_result==None:
#print "0"
stop = start+datetime.timedelta(milliseconds=deadline_ms)
else:
stop = db_timestamp_to_datetime(find_result[0])
if stop > start+datetime.timedelta(milliseconds=deadline_ms):
stop = start+datetime.timedelta(milliseconds=deadline_ms)
c.execute("SELECT * FROM Packets WHERE RunID=? and NodeID=? and SrcNodeID=? ORDER BY CommonTime ASC", [run_id, node['real_id'],node['real_id']])
rows_send = c.fetchall()
#print rows_send
c.execute("SELECT * FROM Packets WHERE RunID=? and NodeID=? and SrcNodeID!=? ORDER BY CommonTime ASC", [run_id, node['real_id'],node['real_id']])
rows_recv = c.fetchall()
#print rows_recv
# consider only packets within the search/timedout interval
for sent in list(rows_send):
sent_time = db_timestamp_to_datetime(sent[2])
if sent_time < start or sent_time>stop:
rows_send.remove(sent)
pkt_analyzer = packet_analyzer()
#print start
#print stop
# also, consider only responses
for received in list(rows_recv):
received_time = db_timestamp_to_datetime(received[2])
pkt = received[4]
ip=pkt_analyzer.decode_ip_packet(pkt)
udp=pkt_analyzer.decode_udp_packet(ip['data'])
mdns=pkt_analyzer.decode_mdns_packet(udp['data'])
# mdns flag say, if response or not, must be response
if received_time < start or received_time > stop or mdns['flags'] & 128!=128:
#print "removing", received
rows_recv.remove(received)
# list packets by their transaction ID
# remove duplicates and out of order packets from the sent
sent_by_id = {}
for i,sent in enumerate(list(rows_send)):
pkt = sent[4]
id = socket.ntohs(struct.unpack('H',pkt[28:30])[0])
if i==0:
last_id=id-1
#print "Out of order %d %d" %( run_id, id), sent
if id==last_id+1:
last_id = id
#print "correct oder", sent
sent_by_id[id] = sent
# find responses and BAD RESPONSES
for received in rows_recv:
pkt = received[4]
id = socket.ntohs(struct.unpack('H',pkt[28:30])[0])
#print "ResponseID: %d" %id
found = 0
for id_sent,sent in sent_by_id.items():
if id==id_sent:
#print "found same IDs", id
found = 1
t_requ = db_timestamp_to_datetime(sent[2])
t_resp = db_timestamp_to_datetime(received[2])
delay = t_resp - t_requ
return delay.seconds*1000 + delay.microseconds / 1000
if found==0:
#print "Fail Runid=%s" %run_id, received
return -1
return deadline_ms
class packet_analyzer():
def __init__(self):
pass
def get_dnssd_query_response_rtt(self):
'''
'''
pass
def decode_mdns_packet(self, p):
d={}
d['transaction_ID']=socket.ntohs(struct.unpack('H',p[0:2])[0])
d['flags']=struct.unpack('H',p[2:4])[0]
d['n_questions']=socket.ntohs(struct.unpack('H',p[4:6])[0])
d['n_answerRRs']=socket.ntohs(struct.unpack('H',p[6:8])[0])
d['n_authRRs']=socket.ntohs(struct.unpack('H',p[8:10])[0])
d['n_addRRs']=socket.ntohs(struct.unpack('H',p[10:12])[0])
return d
def decode_udp_packet(self, p):
d={}
d['src_port']=socket.ntohs(struct.unpack('H',p[0:2])[0])
d['dst_port']=socket.ntohs(struct.unpack('H',p[2:4])[0])
d['length']=socket.ntohs(struct.unpack('H',p[4:6])[0])
d['checksum']=socket.ntohs(struct.unpack('H',p[6:8])[0])
d['data']=p[8:]
return d
def decode_ip_packet(self, s):
d={}
d['version']=(ord(s[0]) & 0xf0) >> 4
d['header_len']=ord(s[0]) & 0x0f
d['tos']=ord(s[1])
d['total_len']=socket.ntohs(struct.unpack('H',s[2:4])[0])
d['id']=socket.ntohs(struct.unpack('H',s[4:6])[0])
d['flags']=(ord(s[6]) & 0xe0) >> 5
d['fragment_offset']=socket.ntohs(struct.unpack('H',s[6:8])[0] & 0x1f)
d['ttl']=ord(s[8])
d['protocol']=ord(s[9])
d['checksum']=socket.ntohs(struct.unpack('H',s[10:12])[0])
d['source_address']=struct.unpack('i',s[12:16])[0]
d['destination_address']=struct.unpack('i',s[16:20])[0]
if d['header_len']>5:
d['options']=s[20:4*(d['header_len']-5)]
else:
d['options']=None
d['data']=s[4*d['header_len']:]
return d
def decode_eth_packet(self, p):
d={}
d['dst_mac']=0#struct.unpack('H',p[0:6])[0]
d['src_mac']=0#struct.unpack('H',p[6:12])[0]
d['type']=socket.ntohs(struct.unpack('H',p[12:14])[0])
d['data']=p[14:]
return d
def packet_tracker(self, hdr, data):
'''
scans packets for pairs with same queryID and for the first return rtt
'''
global query
global avg
global count
global max
global min
curr_hdr={}
curr_hdr['ts_s'],curr_hdr['ts_us'] = hdr.getts()
curr_hdr['len']=hdr.getlen()
ts = datetime.datetime.fromtimestamp(curr_hdr['ts_s'])
ts = ts + datetime.timedelta(microseconds=curr_hdr['ts_us'])
d3 = None
#d = self.decode_eth_packet(data)
#print d
#if d['type']==2048: #IP
d = self.decode_ip_packet(data)
if d['protocol']==17: # UDP
d2 = self.decode_udp_packet(d['data'])
if d2['dst_port']==5353:
d3 = self.decode_mdns_packet(d2['data'])
if d3==None:
print "not a mdns packet", d3
return
# if this is a query, save the id and time
if d3['flags']==0: #Query
self.queries.append({'id':d3['transaction_ID'], 'ts':ts})
else: #response
#if query[d3['transaction_ID']]==None:
# print "Invalid response, ignoring this packet"
# return
self.responses.append({'id':d3['transaction_ID'], 'ts':ts})
def load_packet_into_list(self, filename):
self.responses = []
self.queries = []
#print ("Parsing file %s" % (filename))
p = pcapy.open_offline(filename)
p.loop(0, self.packet_tracker)
#print self.queries
#print ""
#print self.responses
def find_first_rtt_between(self, start_ts, end_ts):
match = 0
for (id,query) in enumerate(self.queries):
if query['ts']>start_ts and query['ts']<end_ts:
#print query
for (id2,response) in enumerate(self.responses):
if response['ts']>start_ts and response['ts']<end_ts:
if response['id']==query['id']:
diff = response['ts']-query['ts']
#print "Found match, diff ",diff
match = match + 1
print match
counter = 0
events_merge_file_name = "merged_events.csv"
event_log_file_name = "event_log_"
def _get_subdirs(dir):
return [name for name in os.listdir(dir)
if os.path.isdir(os.path.join(dir,name))]
def _get_files(dir, mask):
return
def runcapture_dir_analyzer(dir):
if dir=="capture":
return
def parse_line(line, run, owner):
'''
Each line consists of a timestamp,type,param
The timestamp is concerted into epoch value
'''
list = line.split(',')
dt, _, us= list[0].partition(".")
dt= datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
us= int(us.rstrip("Z"), 10)
ret = dt + datetime.timedelta(microseconds=us)+datetime.timedelta(milliseconds=run.timediff_ms)
return {'ts':ret, 'type':list[1], 'param':list[2], 'origin':owner}
#gt("2008-08-12T12:20:30.656234Z")
#datetime.datetime(2008, 8, 12, 12, 20, 30, 656234)
def db_timestamp_to_datetime(db_timestamp):
dt, _, us= db_timestamp.partition(".")
dt= datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
us= int(us.rstrip("Z"), 10)
return dt + datetime.timedelta(microseconds=us)
def get_delay(run_id, node, needed):
''' gets the search --> add delay from the event list
'''
y=[]
c.execute("SELECT CommonTime FROM Events WHERE runID=? and nodeID=? and EventType='sd_start_search'", [run_id,node['real_id']] )
start_search_time = c.fetchone()[0]
c.execute("SELECT CommonTime FROM Events WHERE runID=? and nodeID=? and EventType='sd_service_add'", [run_id,node['real_id']] )
find_result = c.fetchall()
if find_result==None:
return dead
elif len(find_result) < needed:
return dead
else:
stop_search_time = find_result[needed-1][0]
start = db_timestamp_to_datetime(start_search_time)
stop = db_timestamp_to_datetime(stop_search_time)
delay = stop-start
#print delay.seconds*1000 + delay.microseconds / 1000
delsec = delay.seconds*1000 + delay.microseconds / 1000
#print "response"
#print delsec
#print "run id"
#print run_id
#plt.xlabel('Deadline in ms',{'fontsize':'x-large'})
#plt.ylabel('Responsiveness',{'fontsize':'x-large'})
return delsec
def print_plot():
plt.xlabel('Deadline in ms',{'fontsize':'x-large'})
plt.ylabel('Responsiveness',{'fontsize':'x-large'})
#plt.legend(('no load', '26 VoIP', '53 VoIP', '80 VoIP'),
# 'right', shadow=True)
plt.grid(True)
plt.legend(loc = "lower right")
plt.xlim([0,dead])
plt.hold(True)
# plt.set_xticklabels(plt.get_xticks()/1000)
# savename = fn + "_" + str(sdratio) + ".pdf"
savename = "plot.pdf"
plt.savefig(savename, dpi=600)
if __name__ == '__main__':
global conn
global cfg_search_fail_value
global csv_file
global dead
global fn
cfg_search_fail_value = -1000
# Option parser
parser = optparse.OptionParser(
description='Analyzer for done Service Discovery Experiments.',
prog=os.path.basename(sys.argv[0]),
version='%s 0.0.1' % os.path.basename(sys.argv[0]),
)
# multi = False
csv_file = "/tmp/results.csv"
parser.add_option('-d', '--database', action='append', default = [], dest='database', help='the database file')
parser.add_option('-l', metavar='deadline', type='int', dest='deadline', help='the deadline')
parser.add_option('-x', metavar='exp_file', dest='exp_file', help='the abstract experiment description')
parser.add_option('-o', metavar='csv_file', dest='csv_file', help='the file to which the results are written')
# parser.add_option('-m', action='store_true', dest='multi', help='analyze a multiple instances experiment')
parser.add_option('-r' ,metavar='ratio', type='int', dest='sdratio', help='percentage of service instances needed to be found', default = 100)
options, arguments = parser.parse_args()
if options.csv_file!=None:
csv_file = options.csv_file
if options.database == []:
print "Database file is needed"
exit()
# else:
# for db in options.database:
# print db
# exit()
if options.deadline == None:
print "Deadline is needed"
exit()
else:
dead = options.deadline
# print "dead",dead
#print "Manu"
for database in options.database:
fn = str(database.split('.')[0].split('/')[-1])
print "Database %s" % fn
conn = sqlite3.connect(database)
c = conn.cursor()
c.execute("SELECT expXML FROM ExperimentInfo")
row=c.fetchone()
if row==None:
print "no XML description in database file"
fd = open('/tmp/exp_xml','w')
fd.write(row[0])
fd.close()
if options.exp_file != None:
exp = experiment_description(options.exp_file)
else:
exp = experiment_description('/tmp/exp_xml')
# print exp.platform_specs.actor_map
#print exp.get_requester()
experiment_analyzer(options.sdratio)
print_plot()
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import title
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.text import format_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from manila_ui.api import manila
DELETABLE_STATES = ("available", "error")
class UpdateShareSnapshotRow(tables.Row):
ajax = True
def get_data(self, request, snapshot_id):
snapshot = manila.share_snapshot_get(request, snapshot_id)
if not snapshot.name:
snapshot.name = snapshot_id
return snapshot
def get_size(snapshot):
return _("%sGiB") % snapshot.size
class CreateShareSnapshot(tables.LinkAction):
name = "create_share_snapshot"
verbose_name = _("Create Share Snapshot")
url = "horizon:project:share_snapshots:share_snapshot_create"
classes = ("ajax-modal", "btn-camera")
policy_rules = (("share", "share:create_snapshot"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, "project_id", None)
return {"project_id": project_id}
def allowed(self, request, share=None):
usages = manila.tenant_absolute_limits(request)
snapshots_allowed = (usages['maxTotalShareSnapshots'] >
usages['totalShareSnapshotsUsed'] and
usages['maxTotalSnapshotGigabytes'] >
usages['totalSnapshotGigabytesUsed'])
if not snapshots_allowed:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = format_lazy(
'{verbose_name} {quota_exceeded}',
verbose_name=self.verbose_name,
quota_exceeded=_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Share Snapshot")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
# NOTE(vponomaryov): Disable form with creation of a snapshot for
# shares that has attr 'snapshot_support' equal to False.
if hasattr(share, 'snapshot_support'):
snapshot_support = share.snapshot_support
else:
# NOTE(vponomaryov): Allow creation of a snapshot for shares that
# do not have such attr for backward compatibility.
snapshot_support = True
return share.status in ("available", "in-use") and snapshot_support
class DeleteShareSnapshot(tables.DeleteAction):
policy_rules = (("share", "share:delete_snapshot"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Share Snapshot",
u"Delete Share Snapshots",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Share Snapshot",
u"Deleted Share Snapshots",
count
)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, "project_id", None)
return {"project_id": project_id}
def delete(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
try:
manila.share_snapshot_delete(request, obj_id)
except Exception:
msg = _('Unable to delete snapshot "%s". One or more shares '
'depend on it.')
exceptions.handle(self.request, msg % name)
raise
def allowed(self, request, snapshot=None):
if snapshot:
return snapshot.status in DELETABLE_STATES
return True
class CreateShareFromShareSnapshot(tables.LinkAction):
name = "create_share_from_share_snapshot"
verbose_name = _("Create Share")
url = "horizon:project:shares:create"
classes = ("ajax-modal", "btn-camera")
policy_rules = (("share", "share:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"snapshot_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, share=None):
return share.status == "available"
class EditShareSnapshot(tables.LinkAction):
name = "edit_share_snapshot"
verbose_name = _("Edit Share Snapshot")
url = "horizon:project:share_snapshots:share_snapshot_edit"
classes = ("ajax-modal", "btn-camera")
class ShareSnapshotShareNameColumn(tables.Column):
def get_link_url(self, snapshot):
return reverse(self.link, args=(snapshot.share_id,))
class ManageShareSnapshotRules(tables.LinkAction):
name = "share_snapshot_manage_rules"
verbose_name = _("Manage Share Snapshot Rules")
url = "horizon:project:share_snapshots:share_snapshot_manage_rules"
classes = ("btn-edit",)
policy_rules = (("share", "share:access_get_all"),)
def allowed(self, request, snapshot=None):
share = manila.share_get(request, snapshot.share_id)
return share.mount_snapshot_support
class AddShareSnapshotRule(tables.LinkAction):
name = "share_snapshot_rule_add"
verbose_name = _("Add Share Snapshot Rule")
url = 'horizon:project:share_snapshots:share_snapshot_rule_add'
classes = ("ajax-modal", "btn-create")
icon = "plus"
policy_rules = (("share", "share:allow_access"),)
def allowed(self, request, snapshot=None):
snapshot = manila.share_snapshot_get(
request, self.table.kwargs['snapshot_id'])
return snapshot.status in ("available", "in-use")
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['snapshot_id']])
class DeleteShareSnapshotRule(tables.DeleteAction):
policy_rules = (("share", "share:deny_access"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Share Snapshot Rule",
u"Delete Share Snapshot Rules",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Share Snapshot Rule",
u"Deleted Share Snapshot Rules",
count
)
def delete(self, request, obj_id):
try:
manila.share_snapshot_deny(
request, self.table.kwargs['snapshot_id'], obj_id)
except Exception:
msg = _('Unable to delete snapshot rule "%s".') % obj_id
exceptions.handle(request, msg)
class UpdateShareSnapshotRuleRow(tables.Row):
ajax = True
def get_data(self, request, rule_id):
rules = manila.share_snapshot_rules_list(
request, self.table.kwargs['snapshot_id'])
if rules:
for rule in rules:
if rule.id == rule_id:
return rule
raise exceptions.NotFound
class ShareSnapshotRulesTable(tables.DataTable):
access_type = tables.Column("access_type", verbose_name=_("Access Type"))
access_to = tables.Column("access_to", verbose_name=_("Access to"))
status = tables.Column("state", verbose_name=_("Status"))
def get_object_display(self, obj):
return obj.id
class Meta(object):
name = "rules"
verbose_name = _("Share Snapshot Rules")
status_columns = ["status"]
row_class = UpdateShareSnapshotRuleRow
table_actions = (
AddShareSnapshotRule,
DeleteShareSnapshotRule,
)
row_actions = (
DeleteShareSnapshotRule,
)
class ShareSnapshotsTable(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
)
STATUS_DISPLAY_CHOICES = (
("in-use", pgettext_lazy("Current status of snapshot", u"In-use")),
("available",
pgettext_lazy("Current status of snapshot", u"Available")),
("creating", pgettext_lazy("Current status of snapshot", u"Creating")),
("error", pgettext_lazy("Current status of snapshot", u"Error")),
)
name = tables.Column(
"name",
verbose_name=_("Name"),
link="horizon:project:share_snapshots:share_snapshot_detail")
description = tables.Column(
"description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(
get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column(
"status",
filters=(title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
source = ShareSnapshotShareNameColumn(
"share",
verbose_name=_("Source"),
link="horizon:project:shares:detail")
def get_object_display(self, obj):
return obj.name
class Meta(object):
name = "share_snapshots"
verbose_name = _("Share Snapshots")
status_columns = ["status"]
row_class = UpdateShareSnapshotRow
table_actions = (
tables.NameFilterAction,
DeleteShareSnapshot,
)
row_actions = (
EditShareSnapshot,
CreateShareFromShareSnapshot,
ManageShareSnapshotRules,
DeleteShareSnapshot,
)
| |
from django import http
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext
from django.views.decorators.vary import vary_on_headers
import olympia.core.logger
from olympia import amo
from olympia.bandwagon.views import get_filter as get_filter_view
from olympia.browse.views import personas_listing as personas_listing_view
from olympia.addons.models import Addon, Category
from olympia.amo.decorators import json_view
from olympia.amo.templatetags.jinja_helpers import locale_url, urlparams
from olympia.amo.utils import sorted_groupby, render
from olympia.bandwagon.models import Collection
from olympia.versions.compare import dict_from_int, version_dict, version_int
from .forms import ESSearchForm, SecondarySearchForm
DEFAULT_NUM_COLLECTIONS = 20
DEFAULT_NUM_PERSONAS = 21 # Results appear in a grid of 3 personas x 7 rows.
log = olympia.core.logger.getLogger('z.search')
def _personas(request):
"""Handle the request for persona searches."""
initial = dict(request.GET.items())
# Ignore these filters since return the same results for Firefox
# as for Thunderbird, etc.
initial.update(appver=None, platform=None)
form = ESSearchForm(initial, type=amo.ADDON_PERSONA)
form.is_valid()
qs = Addon.search_public()
filters = ['sort']
mapping = {
'downloads': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name_sort',
'updated': '-last_updated',
'hotness': '-hotness'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-average_daily_users',
types=[amo.ADDON_PERSONA])
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_PERSONAS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
categories, filter, base, category = personas_listing_view(request)
context = {
'pager': pager,
'form': form,
'categories': categories,
'query': form_data,
'filter': filter,
'search_placeholder': 'themes'}
return render(request, 'search/personas.html', context)
def _collections(request):
"""Handle the request for collections."""
# Sorting by relevance isn't an option. Instead the default is `weekly`.
initial = {'sort': 'weekly'}
# Update with GET variables.
initial.update(request.GET.items())
# Ignore appver/platform and set default number of collections per page.
initial.update(appver=None, platform=None, pp=DEFAULT_NUM_COLLECTIONS)
form = SecondarySearchForm(initial)
form.is_valid()
qs = Collection.search().filter(listed=True, app=request.APP.id)
filters = ['sort']
mapping = {
'weekly': '-weekly_subscribers',
'monthly': '-monthly_subscribers',
'all': '-subscribers',
'rating': '-rating',
'created': '-created',
'name': 'name_sort',
'updated': '-modified'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-weekly_subscribers',
types=amo.COLLECTION_SEARCH_CHOICES)
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_COLLECTIONS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
search_opts['sort'] = form.cleaned_data.get('sort')
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
context = {
'pager': pager,
'form': form,
'query': form_data,
'opts': search_opts,
'filter': get_filter_view(request),
'search_placeholder': 'collections'}
return render(request, 'search/collections.html', context)
class BaseAjaxSearch(object):
"""Generates a list of dictionaries of add-on objects based on
ID or name matches. Safe to be served to a JSON-friendly view.
Sample output:
[
{
"id": 1865,
"name": "Adblock Plus",
"url": "http://path/to/details/page",
"icons": {
"32": "http://path/to/icon-32",
"64": "http://path/to/icon-64"
}
},
...
]
"""
def __init__(self, request, excluded_ids=(), ratings=False):
self.request = request
self.excluded_ids = excluded_ids
self.src = getattr(self, 'src', None)
self.types = getattr(self, 'types', amo.ADDON_TYPES.keys())
self.limit = 10
self.key = 'q' # Name of search field.
self.ratings = ratings
# Mapping of JSON key => add-on property.
default_fields = {
'id': 'id',
'name': 'name',
'url': 'get_url_path',
'icons': {
'32': ('get_icon_url', 32),
'64': ('get_icon_url', 64)
}
}
self.fields = getattr(self, 'fields', default_fields)
if self.ratings:
self.fields['rating'] = 'average_rating'
def queryset(self):
"""Get items based on ID or search by name."""
results = Addon.objects.none()
q = self.request.GET.get(self.key)
if q:
try:
pk = int(q)
except ValueError:
pk = None
qs = None
if pk:
qs = Addon.objects.public().filter(id=int(q))
elif len(q) > 2:
qs = Addon.search_public().filter_query_string(q.lower())
if qs:
results = qs.filter(type__in=self.types)
return results
def _build_fields(self, item, fields):
data = {}
for key, prop in fields.iteritems():
if isinstance(prop, dict):
data[key] = self._build_fields(item, prop)
else:
# prop is a tuple like: ('method', 'arg1, 'argN').
if isinstance(prop, tuple):
val = getattr(item, prop[0])(*prop[1:])
else:
val = getattr(item, prop, '')
if callable(val):
val = val()
data[key] = unicode(val)
return data
def build_list(self):
"""Populate a list of dictionaries based on label => property."""
results = []
for item in self.queryset()[:self.limit]:
if item.id in self.excluded_ids:
continue
d = self._build_fields(item, self.fields)
if self.src and 'url' in d:
d['url'] = urlparams(d['url'], src=self.src)
results.append(d)
return results
@property
def items(self):
return self.build_list()
class SearchSuggestionsAjax(BaseAjaxSearch):
src = 'ss'
class AddonSuggestionsAjax(SearchSuggestionsAjax):
# No personas.
types = [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP]
class PersonaSuggestionsAjax(SearchSuggestionsAjax):
types = [amo.ADDON_PERSONA]
@json_view
@non_atomic_requests
def ajax_search(request):
"""This is currently used only to return add-ons for populating a
new collection. Themes (formerly Personas) are included by default, so
this can be used elsewhere.
"""
search_obj = BaseAjaxSearch(request)
search_obj.types = amo.ADDON_SEARCH_TYPES
return search_obj.items
@json_view
@non_atomic_requests
def ajax_search_suggestions(request):
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
suggester = suggesterClass(request, ratings=False)
return _build_suggestions(
request,
cat,
suggester)
def _build_suggestions(request, cat, suggester):
results = []
q = request.GET.get('q')
if q and (q.isdigit() or len(q) > 2):
q_ = q.lower()
if cat != 'apps':
# Applications.
for a in amo.APP_USAGE:
name_ = unicode(a.pretty).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': a.id,
'name': ugettext(u'{0} Add-ons').format(a.pretty),
'url': locale_url(a.short),
'cls': 'app ' + a.short
})
# Categories.
cats = Category.objects
cats = cats.filter(Q(application=request.APP.id) |
Q(type=amo.ADDON_SEARCH))
if cat == 'themes':
cats = cats.filter(type=amo.ADDON_PERSONA)
else:
cats = cats.exclude(type=amo.ADDON_PERSONA)
for c in cats:
if not c.name:
continue
name_ = unicode(c.name).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': c.id,
'name': unicode(c.name),
'url': c.get_url_path(),
'cls': 'cat'
})
results += suggester.items
return results
def _filter_search(request, qs, query, filters, sorting,
sorting_default='-weekly_downloads', types=None):
"""Filter an ES queryset based on a list of filters."""
if types is None:
types = []
APP = request.APP
# Intersection of the form fields present and the filters we want to apply.
show = [f for f in filters if query.get(f)]
if query.get('q'):
qs = qs.filter_query_string(query['q'])
if 'platform' in show and query['platform'] in amo.PLATFORM_DICT:
ps = (amo.PLATFORM_DICT[query['platform']].id, amo.PLATFORM_ALL.id)
# If we've selected "All Systems" don't filter by platform.
if ps[0] != ps[1]:
qs = qs.filter(platforms__in=ps)
if 'appver' in show:
# Get a min version less than X.0.
low = version_int(query['appver'])
# Get a max version greater than X.0a.
high = version_int(query['appver'] + 'a')
# Note: when strict compatibility is not enabled on add-ons, we
# fake the max version we index in compatible_apps.
qs = qs.filter(**{
'current_version.compatible_apps.%s.max__gte' % APP.id: high,
'current_version.compatible_apps.%s.min__lte' % APP.id: low
})
if 'atype' in show and query['atype'] in amo.ADDON_TYPES:
qs = qs.filter(type=query['atype'])
else:
qs = qs.filter(type__in=types)
if 'cat' in show:
cat = (Category.objects.filter(id=query['cat'])
.filter(Q(application=APP.id) | Q(type=amo.ADDON_SEARCH)))
if not cat.exists():
show.remove('cat')
if 'cat' in show:
qs = qs.filter(category=query['cat'])
if 'tag' in show:
qs = qs.filter(tags=query['tag'])
if 'sort' in show:
qs = qs.order_by(sorting[query['sort']])
elif not query.get('q'):
# Sort by a default if there was no query so results are predictable.
qs = qs.order_by(sorting_default)
return qs
@vary_on_headers('X-PJAX')
@non_atomic_requests
def search(request, tag_name=None):
APP = request.APP
types = (amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP)
category = request.GET.get('cat')
if category == 'collections':
extra_params = {'sort': {'newest': 'created'}}
else:
extra_params = None
fixed = fix_search_query(request.GET, extra_params=extra_params)
if fixed is not request.GET:
# We generally want a 301, except if it's a "type", because that's only
# here to support the new frontend, so a permanent redirect could mess
# things up when the user is going back and forth between the old and
# new frontend. https://github.com/mozilla/addons-server/issues/6846
status = 302 if 'type' in request.GET else 301
return http.HttpResponseRedirect(
urlparams(request.path, **fixed), status=status)
facets = request.GET.copy()
# In order to differentiate between "all versions" and an undefined value,
# we use "any" instead of "" in the frontend.
if 'appver' in facets and facets['appver'] == 'any':
facets['appver'] = ''
form = ESSearchForm(facets or {})
form.is_valid() # Let the form try to clean data.
form_data = form.cleaned_data
if tag_name:
form_data['tag'] = tag_name
if category == 'collections':
return _collections(request)
elif category == 'themes' or form_data.get('atype') == amo.ADDON_PERSONA:
return _personas(request)
sort, extra_sort = split_choices(form.sort_choices, 'created')
if form_data.get('atype') == amo.ADDON_SEARCH:
# Search add-ons should not be searched by ADU, so replace 'Users'
# sort with 'Weekly Downloads'.
sort, extra_sort = list(sort), list(extra_sort)
sort[1] = extra_sort[1]
del extra_sort[1]
# Perform search, using aggregation so that we can build the facets UI.
# Note that we don't need to aggregate on platforms, that facet it built
# from our constants directly, using the current application for this
# request (request.APP).
appversion_field = 'current_version.compatible_apps.%s.max' % APP.id
qs = (Addon.search_public().filter(app=APP.id)
.aggregate(tags={'terms': {'field': 'tags'}},
appversions={'terms': {'field': appversion_field}},
categories={'terms': {'field': 'category', 'size': 200}})
)
filters = ['atype', 'appver', 'cat', 'sort', 'tag', 'platform']
mapping = {'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name_sort',
'downloads': '-weekly_downloads',
'updated': '-last_updated',
'hotness': '-hotness'}
qs = _filter_search(request, qs, form_data, filters, mapping, types=types)
pager = amo.utils.paginate(request, qs)
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': form_data,
'form': form,
'sort_opts': sort,
'extra_sort_opts': extra_sort,
'sorting': sort_sidebar(request, form_data, form),
'sort': form_data.get('sort'),
}
if not ctx['is_pjax']:
aggregations = pager.object_list.aggregations
ctx.update({
'tag': tag_name,
'categories': category_sidebar(request, form_data, aggregations),
'platforms': platform_sidebar(request, form_data),
'versions': version_sidebar(request, form_data, aggregations),
'tags': tag_sidebar(request, form_data, aggregations),
})
return render(request, 'search/results.html', ctx)
class FacetLink(object):
def __init__(self, text, urlparams, selected=False, children=None):
self.text = text
self.urlparams = urlparams
self.selected = selected
self.children = children or []
def sort_sidebar(request, form_data, form):
sort = form_data.get('sort')
return [FacetLink(text, {'sort': key}, key == sort)
for key, text in form.sort_choices]
def category_sidebar(request, form_data, aggregations):
APP = request.APP
qatype, qcat = form_data.get('atype'), form_data.get('cat')
cats = [f['key'] for f in aggregations['categories']]
categories = Category.objects.filter(id__in=cats)
if qatype in amo.ADDON_TYPES:
categories = categories.filter(type=qatype)
# Search categories don't have an application.
categories = categories.filter(Q(application=APP.id) |
Q(type=amo.ADDON_SEARCH))
# If category is listed as a facet but type is not, then show All.
if qcat in cats and not qatype:
qatype = True
# If category is not listed as a facet NOR available for this application,
# then show All.
if qcat not in categories.values_list('id', flat=True):
qatype = qcat = None
categories = [(_atype, sorted(_cats, key=lambda x: x.name))
for _atype, _cats in sorted_groupby(categories, 'type')]
rv = []
cat_params = {'cat': None}
all_label = ugettext(u'All Add-ons')
rv = [FacetLink(all_label, {'atype': None, 'cat': None}, not qatype)]
for addon_type, cats in categories:
selected = addon_type == qatype and not qcat
# Build the linkparams.
cat_params = cat_params.copy()
cat_params.update(atype=addon_type)
link = FacetLink(amo.ADDON_TYPES[addon_type],
cat_params, selected)
link.children = [
FacetLink(c.name, dict(cat_params, cat=c.id), c.id == qcat)
for c in cats]
rv.append(link)
return rv
def version_sidebar(request, form_data, aggregations):
appver = ''
# If appver is in the request, we read it cleaned via form_data.
if 'appver' in request.GET or form_data.get('appver'):
appver = form_data.get('appver')
app = unicode(request.APP.pretty)
exclude_versions = getattr(request.APP, 'exclude_versions', [])
# L10n: {0} is an application, such as Firefox. This means "any version of
# Firefox."
rv = [FacetLink(
ugettext(u'Any {0}').format(app), {'appver': 'any'}, not appver)]
vs = [dict_from_int(f['key']) for f in aggregations['appversions']]
# Insert the filtered app version even if it's not a facet.
av_dict = version_dict(appver)
if av_dict and av_dict not in vs and av_dict['major']:
vs.append(av_dict)
# Valid versions must be in the form of `major.minor`.
vs = set((v['major'], v['minor1'] if v['minor1'] not in (None, 99) else 0)
for v in vs)
versions = ['%s.%s' % v for v in sorted(vs, reverse=True)]
for version, floated in zip(versions, map(float, versions)):
if (floated not in exclude_versions and
floated > request.APP.min_display_version):
rv.append(FacetLink('%s %s' % (app, version), {'appver': version},
appver == version))
return rv
def platform_sidebar(request, form_data):
qplatform = form_data.get('platform')
app_platforms = request.APP.platforms.values()
ALL = app_platforms.pop(0)
# The default is to show "All Systems."
selected = amo.PLATFORM_DICT.get(qplatform, ALL)
if selected != ALL and selected not in app_platforms:
# Insert the filtered platform even if it's not a facet.
app_platforms.append(selected)
# L10n: "All Systems" means show everything regardless of platform.
rv = [FacetLink(ugettext(u'All Systems'), {'platform': ALL.shortname},
selected == ALL)]
for platform in app_platforms:
rv.append(FacetLink(platform.name, {'platform': platform.shortname},
platform == selected))
return rv
def tag_sidebar(request, form_data, aggregations):
qtag = form_data.get('tag')
tags = [facet['key'] for facet in aggregations['tags']]
rv = [FacetLink(ugettext(u'All Tags'), {'tag': None}, not qtag)]
rv += [FacetLink(tag, {'tag': tag}, tag == qtag) for tag in tags]
if qtag and qtag not in tags:
rv += [FacetLink(qtag, {'tag': qtag}, True)]
return rv
def fix_search_query(query, extra_params=None):
rv = {force_bytes(k): v for k, v in query.items()}
changed = False
# Change old keys to new names.
keys = {
'lver': 'appver',
'pid': 'platform',
'type': 'atype',
}
for old, new in keys.items():
if old in query:
rv[new] = rv.pop(old)
changed = True
# Change old parameter values to new values.
params = {
'sort': {
'newest': 'updated',
'popularity': 'downloads',
'weeklydownloads': 'users',
'averagerating': 'rating',
'sortby': 'sort',
},
'platform': {
str(p.id): p.shortname
for p in amo.PLATFORMS.values()
},
'atype': {k: str(v) for k, v in amo.ADDON_SEARCH_SLUGS.items()},
}
if extra_params:
params.update(extra_params)
for key, fixes in params.items():
if key in rv and rv[key] in fixes:
rv[key] = fixes[rv[key]]
changed = True
return rv if changed else query
def split_choices(choices, split):
"""Split a list of [(key, title)] pairs after key == split."""
index = [idx for idx, (key, title) in enumerate(choices)
if key == split]
if index:
index = index[0] + 1
return choices[:index], choices[index:]
else:
return choices, []
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch Norm bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.distributions import bijector
__all__ = [
"BatchNormalization",
]
def _undo_batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Inverse of tf.nn.batch_normalization.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted `beta` in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted `gamma` in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small `float` added to the minibatch `variance` to
prevent dividing by zero.
name: A name for this operation (optional).
Returns:
batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.
"""
with ops.name_scope(
name, "undo_batchnorm", [x, mean, variance, scale, offset]):
# inv = math_ops.rsqrt(variance + variance_epsilon)
# if scale is not None:
# inv *= scale
# return x * inv + (
# offset - mean * inv if offset is not None else -mean * inv)
rescale = math_ops.sqrt(variance + variance_epsilon)
if scale is not None:
rescale /= scale
batch_unnormalized = x * rescale + (
mean - offset * rescale if offset is not None else mean)
return batch_unnormalized
class BatchNormalization(bijector.Bijector):
"""Compute `Y = g(X) s.t. X = g^-1(Y) = (Y - mean(Y)) / std(Y)`.
Applies Batch Normalization [(Ioffe and Szegedy, 2015)][1] to samples from a
data distribution. This can be used to stabilize training of normalizing
flows ([Papamakarios et al., 2016][3]; [Dinh et al., 2017][2])
When training Deep Neural Networks (DNNs), it is common practice to
normalize or whiten features by shifting them to have zero mean and
scaling them to have unit variance.
The `inverse()` method of the `BatchNormalization` bijector, which is used in
the log-likelihood computation of data samples, implements the normalization
procedure (shift-and-scale) using the mean and standard deviation of the
current minibatch.
Conversely, the `forward()` method of the bijector de-normalizes samples (e.g.
`X*std(Y) + mean(Y)` with the running-average mean and standard deviation
computed at training-time. De-normalization is useful for sampling.
```python
dist = tfd.TransformedDistribution(
distribution=tfd.Normal()),
bijector=tfb.BatchNorm())
y = tfd.MultivariateNormalDiag(loc=1., scale=2.).sample(100) # ~ N(1, 2)
x = dist.bijector.inverse(y) # ~ N(0, 1)
y = dist.sample() # ~ N(1, 2)
```
During training time, `BatchNorm.inverse` and `BatchNorm.forward` are not
guaranteed to be inverses of each other because `inverse(y)` uses statistics
of the current minibatch, while `forward(x)` uses running-average statistics
accumulated from training. In other words,
`BatchNorm.inverse(BatchNorm.forward(...))` and
`BatchNorm.forward(BatchNorm.inverse(...))` will be identical when
`training=False` but may be different when `training=True`.
#### References
[1]: Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating
Deep Network Training by Reducing Internal Covariate Shift. In
_International Conference on Machine Learning_, 2015.
https://arxiv.org/abs/1502.03167
[2]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
def __init__(self,
batchnorm_layer=None,
training=True,
validate_args=False,
name="batch_normalization"):
"""Instantiates the `BatchNorm` bijector.
Args:
batchnorm_layer: `tf.layers.BatchNormalization` layer object. If `None`,
defaults to
`tf.layers.BatchNormalization(gamma_constraint=nn_ops.relu(x) + 1e-6)`.
This ensures positivity of the scale variable.
training: If True, updates running-average statistics during call to
`inverse()`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: If bn_layer is not an instance of
`tf.layers.BatchNormalization`, or if it is specified with `renorm=True`
or a virtual batch size.
"""
# Scale must be positive.
g_constraint = lambda x: nn.relu(x) + 1e-6
self.batchnorm = batchnorm_layer or normalization.BatchNormalization(
gamma_constraint=g_constraint)
self._validate_bn_layer(self.batchnorm)
self._training = training
super(BatchNormalization, self).__init__(
validate_args=validate_args, name=name)
def _validate_bn_layer(self, layer):
"""Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified.
"""
if not isinstance(layer, normalization.BatchNormalization):
raise ValueError(
"batchnorm_layer must be an instance of BatchNormalization layer.")
if layer.renorm:
raise ValueError("BatchNorm Bijector does not support renormalization.")
if layer.virtual_batch_size:
raise ValueError(
"BatchNorm Bijector does not support virtual batch sizes.")
def _get_broadcast_fn(self, x):
# Compute shape to broadcast scale/shift parameters to.
if not x.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(x.shape.as_list())
ndims = len(input_shape)
# event_dims = self._compute_event_dims(x)
reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis]
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.batchnorm.axis[0]] = (
input_shape[self.batchnorm.axis[0]])
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
return _broadcast
def _normalize(self, y):
return self.batchnorm.apply(y, training=self._training)
def _de_normalize(self, x):
# Uses the saved statistics.
if not self.batchnorm.built:
input_shape = x.get_shape()
self.batchnorm.build(input_shape)
broadcast_fn = self._get_broadcast_fn(x)
mean = broadcast_fn(self.batchnorm.moving_mean)
variance = broadcast_fn(self.batchnorm.moving_variance)
beta = broadcast_fn(self.batchnorm.beta) if self.batchnorm.center else None
gamma = broadcast_fn(self.batchnorm.gamma) if self.batchnorm.scale else None
return _undo_batch_normalization(
x, mean, variance, beta, gamma, self.batchnorm.epsilon)
def _forward(self, x):
return self._de_normalize(x)
def _inverse(self, y):
return self._normalize(y)
def _forward_log_det_jacobian(self, x):
# Uses saved statistics to compute volume distortion.
return -self._inverse_log_det_jacobian(x, use_saved_statistics=True)
def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):
if not y.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(y.shape.as_list())
if not self.batchnorm.built:
# Create variables.
self.batchnorm.build(input_shape)
event_dims = self.batchnorm.axis
reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims]
if use_saved_statistics or not self._training:
log_variance = math_ops.log(
self.batchnorm.moving_variance + self.batchnorm.epsilon)
else:
# At training-time, ildj is computed from the mean and log-variance across
# the current minibatch.
_, v = nn.moments(y, axes=reduction_axes, keep_dims=True)
log_variance = math_ops.log(v + self.batchnorm.epsilon)
# `gamma` and `log Var(y)` reductions over event_dims.
# Log(total change in area from gamma term).
log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma))
# Log(total change in area from log-variance term).
log_total_variance = math_ops.reduce_sum(log_variance)
# The ildj is scalar, as it does not depend on the values of x and are
# constant across minibatch elements.
return log_total_gamma - 0.5 * log_total_variance
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import os.path
import tempfile
import fixtures
import glance_store
from oslo_config import cfg
from oslo_db import options
import glance.common.client
from glance.common import config
from glance.db import migration
import glance.db.sqlalchemy.api
import glance.registry.client.v1.client
from glance import tests as glance_tests
from glance.tests import utils as test_utils
TESTING_API_PASTE_CONF = """
[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
/v3: apiv3app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[app:apiv3app]
paste.app_factory = glance.api.v3.router:API.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
TESTING_REGISTRY_PASTE_CONF = """
[pipeline:glance-registry]
pipeline = unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = fakeauth context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api.v1:API.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
class ApiTest(test_utils.BaseTestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.init()
def init(self):
self.test_dir = self.useFixture(fixtures.TempDir()).path
self._configure_logging()
self._configure_policy()
self._setup_database()
self._setup_stores()
self._setup_property_protection()
self.glance_registry_app = self._load_paste_app(
'glance-registry',
flavor=getattr(self, 'registry_flavor', ''),
conf=getattr(self, 'registry_paste_conf',
TESTING_REGISTRY_PASTE_CONF),
)
self._connect_registry_client()
self.glance_api_app = self._load_paste_app(
'glance-api',
flavor=getattr(self, 'api_flavor', ''),
conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF),
)
self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app)
def _setup_property_protection(self):
self._copy_data_file('property-protections.conf', self.test_dir)
self.property_file = os.path.join(self.test_dir,
'property-protections.conf')
def _configure_policy(self):
policy_file = self._copy_data_file('policy.json', self.test_dir)
self.config(policy_file=policy_file, group='oslo_policy')
def _configure_logging(self):
self.config(default_log_levels=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=DEBUG'
])
def _setup_database(self):
sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
options.set_defaults(CONF, connection=sql_connection)
glance.db.sqlalchemy.api.clear_db_env()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
test_utils.execute('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
migration.db_sync()
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
test_utils.execute('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def _setup_stores(self):
glance_store.register_opts(CONF)
image_dir = os.path.join(self.test_dir, "images")
self.config(group='glance_store',
filesystem_store_datadir=image_dir)
glance_store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)
with open(conf_file_path, 'wb') as conf_file:
conf_file.write(conf)
conf_file.flush()
return config.load_paste_app(name, flavor=flavor,
conf_file=conf_file_path)
def _connect_registry_client(self):
def get_connection_type(self2):
def wrapped(*args, **kwargs):
return test_utils.HttplibWsgiAdapter(self.glance_registry_app)
return wrapped
self.stubs.Set(glance.common.client.BaseClient,
'get_connection_type', get_connection_type)
def tearDown(self):
glance.db.sqlalchemy.api.clear_db_env()
super(ApiTest, self).tearDown()
| |
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.common import constants
from cinder import objects
from cinder import quota
from cinder import rpc
from cinder.volume import utils
QUOTAS = quota.QUOTAS
class VolumeAPI(rpc.RPCAPI):
"""Client side of the volume rpc API.
API version history:
.. code-block:: none
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
1.17 - Add replica option to create_volume, promote_replica and
sync_replica.
1.18 - Adds create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, and delete_cgsnapshot. Also adds
the consistencygroup_id parameter in create_volume.
1.19 - Adds update_migrated_volume
1.20 - Adds support for sending objects over RPC in create_snapshot()
and delete_snapshot()
1.21 - Adds update_consistencygroup.
1.22 - Adds create_consistencygroup_from_src.
1.23 - Adds attachment_id to detach_volume.
1.24 - Removed duplicated parameters: snapshot_id, image_id,
source_volid, source_replicaid, consistencygroup_id and
cgsnapshot_id from create_volume. All off them are already
passed either in request_spec or available in the DB.
1.25 - Add source_cg to create_consistencygroup_from_src.
1.26 - Adds support for sending objects over RPC in
create_consistencygroup(), create_consistencygroup_from_src(),
update_consistencygroup() and delete_consistencygroup().
1.27 - Adds support for replication V2
1.28 - Adds manage_existing_snapshot
1.29 - Adds get_capabilities.
1.30 - Adds remove_export
1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot()
and delete_cgsnapshot() to cast method only with necessary
args. Forwarding CGSnapshot object instead of CGSnapshot_id.
1.32 - Adds support for sending objects over RPC in create_volume().
1.33 - Adds support for sending objects over RPC in delete_volume().
1.34 - Adds support for sending objects over RPC in retype().
1.35 - Adds support for sending objects over RPC in extend_volume().
1.36 - Adds support for sending objects over RPC in migrate_volume(),
migrate_volume_completion(), and update_migrated_volume().
1.37 - Adds old_reservations parameter to retype to support quota
checks in the API.
1.38 - Scaling backup service, add get_backup_device() and
secure_file_operations_enabled()
1.39 - Update replication methods to reflect new backend rep strategy
1.40 - Add cascade option to delete_volume().
... Mitaka supports messaging version 1.40. Any changes to existing
methods in 1.x after that point should be done so that they can handle
the version_cap being set to 1.40.
2.0 - Remove 1.x compatibility
2.1 - Add get_manageable_volumes() and get_manageable_snapshots().
2.2 - Adds support for sending objects over RPC in manage_existing().
2.3 - Adds support for sending objects over RPC in
initialize_connection().
2.4 - Sends request_spec as object in create_volume().
2.5 - Adds create_group, delete_group, and update_group
2.6 - Adds create_group_snapshot, delete_group_snapshot, and
create_group_from_src().
... Newton supports messaging version 2.6. Any changes to existing
methods in 2.x after that point should be done so that they can handle
the version_cap being set to 2.6.
3.0 - Drop 2.x compatibility
3.1 - Remove promote_replica and reenable_replication. This is
non-backward compatible, but the user-facing API was removed
back in Mitaka when introducing cheesecake replication.
3.2 - Adds support for sending objects over RPC in
get_backup_device().
3.3 - Adds support for sending objects over RPC in attach_volume().
3.4 - Adds support for sending objects over RPC in detach_volume().
3.5 - Adds support for cluster in retype and migrate_volume
3.6 - Switch to use oslo.messaging topics to indicate backends instead
of @backend suffixes in server names.
3.7 - Adds do_cleanup method to do volume cleanups from other nodes
that we were doing in init_host.
3.8 - Make failover_host cluster aware and add failover_completed.
3.9 - Adds new attach/detach methods
3.10 - Returning objects instead of raw dictionaries in
get_manageable_volumes & get_manageable_snapshots
3.11 - Removes create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, delete_cgsnapshot, update_consistencygroup,
and create_consistencygroup_from_src.
3.12 - Adds set_log_levels and get_log_levels
3.13 - Add initialize_connection_snapshot,
terminate_connection_snapshot, and remove_export_snapshot.
3.14 - Adds enable_replication, disable_replication,
failover_replication, and list_replication_targets.
3.15 - Add revert_to_snapshot method
"""
RPC_API_VERSION = '3.15'
RPC_DEFAULT_VERSION = '3.0'
TOPIC = constants.VOLUME_TOPIC
BINARY = 'cinder-volume'
def _get_cctxt(self, host=None, version=None, **kwargs):
if host:
server = utils.extract_host(host)
# TODO(dulek): If we're pinned before 3.6, we should send stuff the
# old way - addressing server=host@backend, topic=cinder-volume.
# Otherwise we're addressing server=host,
# topic=cinder-volume.host@backend. This conditional can go away
# when we stop supporting 3.x.
if self.client.can_send_version('3.6'):
kwargs['topic'] = '%(topic)s.%(host)s' % {'topic': self.TOPIC,
'host': server}
server = utils.extract_host(server, 'host')
kwargs['server'] = server
return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs)
def create_volume(self, ctxt, volume, request_spec, filter_properties,
allow_reschedule=True):
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'create_volume',
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule,
volume=volume)
@rpc.assert_min_rpc_version('3.15')
def revert_to_snapshot(self, ctxt, volume, snapshot):
version = self._compat_ver('3.15')
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'revert_to_snapshot', volume=volume,
snapshot=snapshot)
def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
volume.create_worker()
cctxt = self._get_cctxt(volume.service_topic_queue)
msg_args = {
'volume': volume, 'unmanage_only': unmanage_only,
'cascade': cascade,
}
cctxt.cast(ctxt, 'delete_volume', **msg_args)
def create_snapshot(self, ctxt, volume, snapshot):
snapshot.create_worker()
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'create_snapshot', snapshot=snapshot)
def delete_snapshot(self, ctxt, snapshot, unmanage_only=False):
cctxt = self._get_cctxt(snapshot.service_topic_queue)
cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot,
unmanage_only=unmanage_only)
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
msg_args = {'volume_id': volume.id,
'instance_uuid': instance_uuid,
'host_name': host_name,
'mountpoint': mountpoint,
'mode': mode,
'volume': volume}
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.3', '3.0'))
if not cctxt.can_send_version('3.3'):
msg_args.pop('volume')
return cctxt.call(ctxt, 'attach_volume', **msg_args)
def detach_volume(self, ctxt, volume, attachment_id):
msg_args = {'volume_id': volume.id,
'attachment_id': attachment_id,
'volume': volume}
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.4', '3.0'))
if not self.client.can_send_version('3.4'):
msg_args.pop('volume')
return cctxt.call(ctxt, 'detach_volume', **msg_args)
def copy_volume_to_image(self, ctxt, volume, image_meta):
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
cctxt = self._get_cctxt(volume.service_topic_queue)
return cctxt.call(ctxt, 'initialize_connection', connector=connector,
volume=volume)
def terminate_connection(self, ctxt, volume, connector, force=False):
cctxt = self._get_cctxt(volume.service_topic_queue)
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
def remove_export(self, ctxt, volume):
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'remove_export', volume_id=volume['id'])
def publish_service_capabilities(self, ctxt):
cctxt = self._get_cctxt(fanout=True)
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
cctxt = self._get_cctxt(volume.service_topic_queue)
return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size,
reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_backend, force_host_copy):
backend_p = {'host': dest_backend.host,
'cluster_name': dest_backend.cluster_name,
'capabilities': dest_backend.capabilities}
version = '3.5'
if not self.client.can_send_version(version):
version = '3.0'
del backend_p['cluster_name']
cctxt = self._get_cctxt(volume.service_topic_queue, version)
cctxt.cast(ctxt, 'migrate_volume', volume=volume, host=backend_p,
force_host_copy=force_host_copy)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
cctxt = self._get_cctxt(volume.service_topic_queue)
return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume,
new_volume=new_volume, error=error,)
def retype(self, ctxt, volume, new_type_id, dest_backend,
migration_policy='never', reservations=None,
old_reservations=None):
backend_p = {'host': dest_backend.host,
'cluster_name': dest_backend.cluster_name,
'capabilities': dest_backend.capabilities}
version = '3.5'
if not self.client.can_send_version(version):
version = '3.0'
del backend_p['cluster_name']
cctxt = self._get_cctxt(volume.service_topic_queue, version)
cctxt.cast(ctxt, 'retype', volume=volume, new_type_id=new_type_id,
host=backend_p, migration_policy=migration_policy,
reservations=reservations,
old_reservations=old_reservations)
def manage_existing(self, ctxt, volume, ref):
cctxt = self._get_cctxt(volume.service_topic_queue)
cctxt.cast(ctxt, 'manage_existing', ref=ref, volume=volume)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
cctxt = self._get_cctxt(new_volume['host'])
cctxt.call(ctxt, 'update_migrated_volume',
volume=volume,
new_volume=new_volume,
volume_status=original_volume_status)
def freeze_host(self, ctxt, service):
"""Set backend host to frozen."""
cctxt = self._get_cctxt(service.service_topic_queue)
return cctxt.call(ctxt, 'freeze_host')
def thaw_host(self, ctxt, service):
"""Clear the frozen setting on a backend host."""
cctxt = self._get_cctxt(service.service_topic_queue)
return cctxt.call(ctxt, 'thaw_host')
def failover(self, ctxt, service, secondary_backend_id=None):
"""Failover host to the specified backend_id (secondary). """
version = '3.8'
method = 'failover'
if not self.client.can_send_version(version):
version = '3.0'
method = 'failover_host'
cctxt = self._get_cctxt(service.service_topic_queue, version)
cctxt.cast(ctxt, method, secondary_backend_id=secondary_backend_id)
def failover_completed(self, ctxt, service, updates):
"""Complete failover on all services of the cluster."""
cctxt = self._get_cctxt(service.service_topic_queue, '3.8',
fanout=True)
cctxt.cast(ctxt, 'failover_completed', updates=updates)
def manage_existing_snapshot(self, ctxt, snapshot, ref, backend):
cctxt = self._get_cctxt(backend)
cctxt.cast(ctxt, 'manage_existing_snapshot',
snapshot=snapshot,
ref=ref)
def get_capabilities(self, ctxt, backend_id, discover):
cctxt = self._get_cctxt(backend_id)
return cctxt.call(ctxt, 'get_capabilities', discover=discover)
def get_backup_device(self, ctxt, backup, volume):
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.2', '3.0'))
if cctxt.can_send_version('3.2'):
backup_obj = cctxt.call(ctxt, 'get_backup_device', backup=backup,
want_objects=True)
else:
backup_dict = cctxt.call(ctxt, 'get_backup_device', backup=backup)
backup_obj = objects.BackupDeviceInfo.from_primitive(backup_dict,
ctxt)
return backup_obj
def secure_file_operations_enabled(self, ctxt, volume):
cctxt = self._get_cctxt(volume.service_topic_queue)
return cctxt.call(ctxt, 'secure_file_operations_enabled',
volume=volume)
def get_manageable_volumes(self, ctxt, service, marker, limit, offset,
sort_keys, sort_dirs):
version = ('3.10', '3.0')
cctxt = self._get_cctxt(service.service_topic_queue, version=version)
msg_args = {'marker': marker,
'limit': limit,
'offset': offset,
'sort_keys': sort_keys,
'sort_dirs': sort_dirs,
}
if cctxt.can_send_version('3.10'):
msg_args['want_objects'] = True
return cctxt.call(ctxt, 'get_manageable_volumes', **msg_args)
def get_manageable_snapshots(self, ctxt, service, marker, limit, offset,
sort_keys, sort_dirs):
version = ('3.10', '3.0')
cctxt = self._get_cctxt(service.service_topic_queue, version=version)
msg_args = {'marker': marker,
'limit': limit,
'offset': offset,
'sort_keys': sort_keys,
'sort_dirs': sort_dirs,
}
if cctxt.can_send_version('3.10'):
msg_args['want_objects'] = True
return cctxt.call(ctxt, 'get_manageable_snapshots', **msg_args)
def create_group(self, ctxt, group):
cctxt = self._get_cctxt(group.service_topic_queue)
cctxt.cast(ctxt, 'create_group', group=group)
def delete_group(self, ctxt, group):
cctxt = self._get_cctxt(group.service_topic_queue)
cctxt.cast(ctxt, 'delete_group', group=group)
def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None):
cctxt = self._get_cctxt(group.service_topic_queue)
cctxt.cast(ctxt, 'update_group', group=group, add_volumes=add_volumes,
remove_volumes=remove_volumes)
def create_group_from_src(self, ctxt, group, group_snapshot=None,
source_group=None):
cctxt = self._get_cctxt(group.service_topic_queue)
cctxt.cast(ctxt, 'create_group_from_src', group=group,
group_snapshot=group_snapshot, source_group=source_group)
def create_group_snapshot(self, ctxt, group_snapshot):
cctxt = self._get_cctxt(group_snapshot.service_topic_queue)
cctxt.cast(ctxt, 'create_group_snapshot',
group_snapshot=group_snapshot)
def delete_group_snapshot(self, ctxt, group_snapshot):
cctxt = self._get_cctxt(group_snapshot.service_topic_queue)
cctxt.cast(ctxt, 'delete_group_snapshot',
group_snapshot=group_snapshot)
@rpc.assert_min_rpc_version('3.13')
def initialize_connection_snapshot(self, ctxt, snapshot, connector):
cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13')
return cctxt.call(ctxt, 'initialize_connection_snapshot',
snapshot_id=snapshot.id,
connector=connector)
@rpc.assert_min_rpc_version('3.13')
def terminate_connection_snapshot(self, ctxt, snapshot, connector,
force=False):
cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13')
return cctxt.call(ctxt, 'terminate_connection_snapshot',
snapshot_id=snapshot.id,
connector=connector, force=force)
@rpc.assert_min_rpc_version('3.13')
def remove_export_snapshot(self, ctxt, snapshot):
cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13')
cctxt.cast(ctxt, 'remove_export_snapshot', snapshot_id=snapshot.id)
@rpc.assert_min_rpc_version('3.9')
def attachment_update(self, ctxt, vref, connector, attachment_id):
version = self._compat_ver('3.9')
cctxt = self._get_cctxt(vref.host, version=version)
return cctxt.call(ctxt,
'attachment_update',
vref=vref,
connector=connector,
attachment_id=attachment_id)
@rpc.assert_min_rpc_version('3.9')
def attachment_delete(self, ctxt, attachment_id, vref):
version = self._compat_ver('3.9')
cctxt = self._get_cctxt(vref.host, version=version)
return cctxt.call(ctxt,
'attachment_delete',
attachment_id=attachment_id,
vref=vref)
@rpc.assert_min_rpc_version('3.7')
def do_cleanup(self, ctxt, cleanup_request):
"""Perform this service/cluster resource cleanup as requested."""
destination = cleanup_request.service_topic_queue
cctxt = self._get_cctxt(destination, '3.7')
# NOTE(geguileo): This call goes to do_cleanup code in
# cinder.manager.CleanableManager unless in the future we overwrite it
# in cinder.volume.manager
cctxt.cast(ctxt, 'do_cleanup', cleanup_request=cleanup_request)
@rpc.assert_min_rpc_version('3.12')
def set_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(host=service.host, version='3.12')
cctxt.cast(context, 'set_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('3.12')
def get_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(host=service.host, version='3.12')
return cctxt.call(context, 'get_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('3.14')
def enable_replication(self, ctxt, group):
cctxt = self._get_cctxt(group.host, version='3.14')
cctxt.cast(ctxt, 'enable_replication',
group=group)
@rpc.assert_min_rpc_version('3.14')
def disable_replication(self, ctxt, group):
cctxt = self._get_cctxt(group.host, version='3.14')
cctxt.cast(ctxt, 'disable_replication',
group=group)
@rpc.assert_min_rpc_version('3.14')
def failover_replication(self, ctxt, group, allow_attached_volume=False,
secondary_backend_id=None):
cctxt = self._get_cctxt(group.host, version='3.14')
cctxt.cast(ctxt, 'failover_replication',
group=group, allow_attached_volume=allow_attached_volume,
secondary_backend_id=secondary_backend_id)
@rpc.assert_min_rpc_version('3.14')
def list_replication_targets(self, ctxt, group):
cctxt = self._get_cctxt(group.host, version='3.14')
return cctxt.call(ctxt, 'list_replication_targets',
group=group)
| |
'''
Support classes to handle Actifio Object endpoints.
'''
import sys
# import auxialary libraries
if sys.version [:3] == "2.7":
from actexceptions import *
elif sys.version[0] == "3":
from Actifio.actexceptions import *
############# Base Class for all ################
__metaclass__ = type
class ActObject():
def __init__(self, appliance, objectData, printable, uniqueid):
"""
Initiator forthe ActObjeect. This will be a base class for all the other Actifio Object types
appliance: Actifio object, which generated this object
objectData: Data to be loaded to this actifio Object
"""
self.objectdata = objectData
self.printable = printable
self.appliance = appliance
self.id = uniqueid
def __str__(self):
return self.printable
def __getattr__(self, name):
return self.objectdata.get(name)
def get(self, parameter):
return self.objectdata.get(parameter)
class ActObjCollection():
def __init__(self, objecttype, retobject, appliance, objectData):
"""
This is the base object class for all the Actifio support object types.
objecttype: is a string variable to describe the type of objects to contain in this object collection
retobject: __next__ and __getitem__ return this type of objects.
appliane: actifio object which generated this object collections.
objectData: data for the objects
"""
self.objectdata = objectData
self.objtype = objecttype
self.returnobj = retobject
self.appliance = appliance
def __str__(self):
return "Collection of " + str(len(self)) + " " + self.objtype + "."
def __iter__(self):
return self.ActObjIterator(self)
def __len__(self):
return len(self.objectdata)
def __getitem__(self, _index):
return self.returnobj(self.appliance, self.objectdata[_index])
class ActObjIterator:
def __init__(self, ObjectCollection):
self.objCollection = ObjectCollection
self._index = 0
def __iter__(self):
return self
def __next__(self):
if self._index < len(self.objCollection):
return_object = self.objCollection.returnobj(self.objCollection.appliance, self.objCollection.objectdata[self._index])
self._index += 1
return return_object
else:
raise StopIteration
# python2.7 support
next = __next__
############# Restoreoptions Related ###############
class ActRestoreoption(ActObject):
def __init__(self, appliance, restoptiondata):
super(ActRestoreoption, self).__init__(appliance, restoptiondata, restoptiondata['name'], restoptiondata['name'])
class ActRestoreoptionCollection(ActObjCollection):
'''
Iterable class of collection of resotore options.
'''
def __init__(self, appliance, lsrestoreoptionsdata):
return super(ActRestoreoptionCollection, self).__init__("restoreoptions", ActRestoreoption, appliance, lsrestoreoptionsdata)
############ Provisining Options Related ##########
class ActProvisiningption(ActObject):
def __init__(self, appliance, provoptiondata):
super(ActProvisiningption, self).__init__(appliance, provoptiondata, provoptiondata['name'], provoptiondata['name'])
class ActProvisiningptionCollection(ActObjCollection):
'''
Iterable class of collection of provisioning options.
'''
def __init__(self, appliance, lsappclassdata):
return super(ActRestoreoptionCollection, self).__init__("provisionoptions", ActProvisiningption, appliance, lsappclassdata)
############## Hosts Related ######################
class ActHost(ActObject):
def __init__(self, applaince, hostdata):
super(ActHost, self).__init__(applaince, hostdata, hostdata['hostname'], hostdata['id'])
def details(self):
host_details = self.appliance.run_uds_command("info", "lshost", {"argument" : self.id})
self.objectdata = host_details['result']
class ActHostCollection(ActObjCollection):
def __init__(self, appliance, lshostdata):
return super(ActHostCollection, self).__init__("hosts", ActHost, appliance, lshostdata)
############# Applications Related ###############
class ActApplication(ActObject):
def __init__(self, applaince, appdata):
super(ActApplication, self).__init__(applaince, appdata, appdata['appname'], appdata['id'])
def details(self):
app_details = self.appliance.run_uds_command("info", "lsapplication", {"argument" : self.id})
self.objectdata = app_details['result']
def provisioningoptions(self):
"""
Retrieve restore options for a ActImage for mount / clone / restore operations
Args:
None:
Returns:
Returns a ActProvisiningptionCollection object with the relavant provisioning options for this appclass.
"""
provops_capabilities = self.appliance.run_uds_command ('info', 'lsappclass', {'name': self.appclass})
return ActRestoreoptionCollection(self, provops_capabilities['result'])
class ActAppCollection(ActObjCollection):
def __init__(self, appliance, lsapplicationdata):
return super(ActAppCollection, self).__init__("applications", ActApplication, appliance, lsapplicationdata)
############# Image Related ######################
class ActImage(ActObject):
def __init__(self, applaince, imgdata):
super(ActImage, self).__init__(applaince, imgdata, imgdata['backupname'], imgdata['id'])
def details(self):
"""
Fetch further details of the backups image.
Args:
None
Returns:
None
"""
image_details = self.appliance.run_uds_command("info", "lsbackup", {"argument" : self.id})
self.objectdata = image_details['result']
def restoreoptions(self, action, targethost):
"""
Retrieve restore options for a ActImage for mount / clone / restore operations
Args:
:action (required): operation [ mount, restore , clone ]
:targethost (required): Host ID of the targethost, ActHost type
Returns:
Returns a ActRestoreoptionCollection object with the relavant restore options for this image, for the specified action.
"""
if not isinstance(targethost, ActHost):
raise ActUserError("'targethost' needs to be ActHost type")
if action not in ['mount', 'clone', 'restore']:
raise ActUserError("Allowed values for 'action' are mount, clone and restore")
restoreops_capabilities = self.appliance.run_uds_command ('info', 'lsrestoreoptions', {'applicationtype': self.apptype, 'action': 'mount', 'targethost': targethost.id })
return ActRestoreoptionCollection(self, restoreops_capabilities['result'])
def provisioningoptions(self):
"""
Retrieve restore xoptions for a ActImage for mount / clone / restore operations
Args:
None:
Returns:
Returns a ActProvisiningptionCollection object with the relavant provisioning options for this appclass.
"""
self.details()
provops_capabilities = self.appliance.run_uds_command ('info', 'lsappclass', {'name': self.appclass})
return ActRestoreoptionCollection(self, provops_capabilities['result'])
class ActImageCollection(ActObjCollection):
def __init__(self, appliance, lsbackupdata):
return super(ActImageCollection, self).__init__("images", ActImage, appliance, lsbackupdata)
############## Jobs Related ######################
class ActJob(ActObject):
def __init__(self, applaince, jobdata):
super(ActJob, self).__init__(applaince, jobdata, jobdata['jobname'], jobdata['id'])
def refresh(self):
"""
Method to refresh the job details.
Args:
None
Returns:
None
"""
from time import sleep
if self.status == 'running' or self.status == 'waiting':
while 1:
try:
this_job = self.appliance.run_uds_command('info', 'lsjob', {'filtervalue' : {'jobname': str(self)}})
except:
pass
if len(this_job['result']) == 0:
sleep (1)
try:
this_job = self.appliance.run_uds_command('info', 'lsjobhistory', {'filtervalue' : {'jobname': str(self)}})
except:
raise
if len(this_job['result']) > 0:
break
else:
break
self.__init__(self.appliance, this_job['result'][0])
class ActJobsCollection(ActObjCollection):
'''
Iterable collection of jobs.
'''
def __init__(self, appliance, lsjobsalldata):
return super(ActJobsCollection, self).__init__("jobs", ActJob, appliance, lsjobsalldata)
def refresh(self):
"""
Method to refresh the job details, for each job.
"""
for job in self:
job.refresh()
| |
# -*- coding: utf-8 -*-
'''
Profiles Module
===============
Manage locally installed configuration profiles (.mobileconfig)
:maintainer: Mosen <mosen@github.com>
:maturity: new
:depends: objc
:platform: darwin
'''
import base64
import binascii
import hashlib
import logging
import os
import plistlib
import pprint
import re
import tempfile
import uuid
import salt.exceptions
import salt.utils
import salt.utils.platform
import six
log = logging.getLogger(__name__)
__virtualname__ = 'profile'
def __virtual__():
if salt.utils.platform.is_darwin():
return __virtualname__
return (False, 'module.profile only available on macOS.')
def _content_to_uuid(payload):
'''
Generate a UUID based upon the payload content
:param payload:
:return:
'''
log.debug('Attempting to Hash {}'.format(payload))
if six.PY3:
str_payload = plistlib.dumps(payload)
else:
str_payload = plistlib.writePlistToString(payload)
hashobj = hashlib.md5(str_payload)
identifier = re.sub(
b'([0-9a-f]{8})([0-9a-f]{4})([0-9a-f]{4})([0-9a-f]{4})([0-9a-f]{12})',
b'\\1-\\2-\\3-\\4-\\5',
binascii.hexlify(hashobj.digest()))
return identifier.decode()
def _add_activedirectory_keys(payload):
'''
As per dayglojesus/managedmac, an excerpt from mobileconfig.rb:199
The Advanced Active Directory profile contains flag keys which inform
the installation process which configuration keys should actually be
activated.
http://support.apple.com/kb/HT5981?viewlocale=en_US&locale=en_US
For example, if we wanted to change the default shell for AD accounts, we
would actually need to define two keys: a configuration key and a flag key.
<key>ADDefaultUserShell</key>
<string>/bin/zsh</string>
<key>ADDefaultUserShellFlag</key>
<true/>
If you fail to specify this second key (the activation or "flag" key), the
configuration key will be ignored when the mobileconfig is processed.
To avoid having to activate and deactivate the configuration keys, we
pre-process the content array by overriding the transform_content method
and shoehorn these flag keys into place dynamically, as required.
:param payload:
:return:
'''
needs_flag = ['ADAllowMultiDomainAuth',
'ADCreateMobileAccountAtLogin',
'ADDefaultUserShell',
'ADDomainAdminGroupList',
'ADForceHomeLocal',
'ADNamespace',
'ADPacketEncrypt',
'ADPacketSign',
'ADPreferredDCServer',
'ADRestrictDDNS',
'ADTrustChangePassIntervalDays',
'ADUseWindowsUNCPath',
'ADWarnUserBeforeCreatingMA',
'ADMapUIDAttribute',
'ADMapGIDAttribute',
'ADMapGGIDAttribute']
for k in payload.keys():
if k in needs_flag:
payload[str(k) + 'Flag'] = True
def _check_top_level_key(old, new):
'''
checks the old and new profiles to see if there are any top level key
differences, returns a dictionary of whether they differ and if so what
the old and new keys pair differences are
'''
try:
log.debug('Checking top level key for profile "{}"'.format(
new['PayloadIdentifier']))
except KeyError as e:
log.warning(e)
pass
ret = {
'differ': False,
'old_kv': {},
'new_kv': {}
}
keys_to_check = [
'PayloadDescription',
'PayloadDisplayName',
'PayloadIdentifier',
'PayloadOrganization',
'PayloadRemovalDisallowed'
]
for key, value in new.items():
log.trace('Checking top level key {}'.format(key))
if not key in keys_to_check:
log.trace('key {} not in our list of keys to validate'.format(key))
continue
if value == 'true':
value = True
if value == 'false':
value = False
try:
old_value = old[key.replace("Payload","Profile")]
if old_value == 'true':
old_value = True
if old_value == 'false':
old_value = False
except KeyError as e:
log.debug('_check_top_level_key: Caught KeyError on {} trying to replace.'.format(e))
continue
if value != old_value:
log.debug('Found difference in profile Key {}'.format(key))
ret['differ'] = True
new_goods = {key: value}
old_goods = {key: old_value}
ret['old_kv'].update(old_goods)
ret['new_kv'].update(new_goods)
log.trace('will return from profile: _check_top_level_key: {}'.format(ret))
return ret
def _check_top_level_key(old, new):
'''
checks the old and new profiles to see if there are any top level key
differences, returns a dictionary of whether they differ and if so what
the old and new keys pair differences are
'''
try:
log.debug('Checking top level key for profile "{}"'.format(
new['PayloadIdentifier']))
except KeyError as e:
log.warning(e)
pass
ret = {
'differ': False,
'old_kv': {},
'new_kv': {}
}
keys_to_check = [
'PayloadDescription',
'PayloadDisplayName',
'PayloadIdentifier',
'PayloadOrganization',
'PayloadRemovalDisallowed'
]
for key, value in new.items():
log.trace('Checking top level key {}'.format(key))
if not key in keys_to_check:
log.trace('key {} not in our list of keys to validate'.format(key))
continue
if value == 'true':
value = True
if value == 'false':
value = False
try:
old_value = old[key.replace("Payload","Profile")]
if old_value == 'true':
old_value = True
if old_value == 'false':
old_value = False
except KeyError as e:
log.debug('_check_top_level_key: Caught KeyError on {} trying to replace.'.format(e))
continue
if value != old_value:
log.debug('Found difference in profile Key {}'.format(key))
ret['differ'] = True
new_goods = {key: value}
old_goods = {key: old_value}
ret['old_kv'].update(old_goods)
ret['new_kv'].update(new_goods)
log.trace('will return from profile: _check_top_level_key: {}'.format(ret))
return ret
def _transform_payload(payload, identifier):
'''
Transform a payload by:
- Calculating the UUID based upon a hash of the content.
- Adding common keys required for every payload.
- Adding required flags for the active directory payload
:param payload:
:param identifier:
:return:
'''
if 'PayloadUUID' in payload:
log.debug('Found PayloadUUID in Payload removing')
del payload['PayloadUUID']
hashed_uuid = _content_to_uuid(payload)
log.debug('hashed_uuid = {}'.format(hashed_uuid))
if not 'PayloadUUID' in payload:
payload['PayloadUUID'] = hashed_uuid
# No identifier supplied for the payload, so we generate one
log.debug('Generating PayloadIdentifier')
if 'PayloadIdentifier' not in payload:
payload['PayloadIdentifier'] = "{0}.{1}".format(identifier, hashed_uuid)
payload['PayloadEnabled'] = True
payload['PayloadVersion'] = 1
try:
if payload['PayloadType'] == 'com.apple.DirectoryService.managed':
_add_activedirectory_keys(payload)
except Exception as e:
pass
return payload
def _transform_content(content, identifier):
'''
As dayglojesus/managedmac notes:
PayloadUUID for each Payload is modified MD5sum of the payload itself, minus some keys.
We can use this to check whether or not the content has been modified. Even when the attributes cannot
be compared (as with passwords, which are omitted).
'''
if not content:
log.debug('module.profile - Found empty content')
return list()
log.debug('module.profile - Found GOOD content')
log.debug('{} {}'.format(content, identifier))
transformed = []
for payload in content:
log.debug('module.profile - trying to transform {}'.format(payload))
transformed.append(_transform_payload(payload, identifier))
# transformed = [_transform_payload(payload, identifier) for payload in content]
return transformed
def validate(identifier, profile_dict):
'''will compare the installed identifier if one and get the uuid of the
payload content and compare against that.
'''
ret = {'installed': False,
'changed': False,
'old_payload': [],
'new_payload': []
}
if six.PY3:
new_prof_data = plistlib.loads(profile_dict)
else:
new_prof_data = plistlib.readPlistFromString(profile_dict)
try:
new_prof_data_payload_con = new_prof_data['PayloadContent']
ret['new_payload'] = new_prof_data_payload_con
except KeyError:
pass
new_uuids = []
for item in new_prof_data_payload_con:
try:
new_uuids.append(item['PayloadUUID'])
except KeyError:
pass
current_items = __salt__['profile.item_keys'](identifier)
if not current_items:
log.debug('Could not find any item keys for {}'.format(identifier))
ret['old_payload'] = 'Not installed'
return ret
try:
current_profile_items = current_items['ProfileItems']
ret['old_payload'] = current_profile_items
except KeyError:
log.debug('Failed to get ProfileItems from installed Profile')
return ret
installed_uuids = []
for item in current_profile_items:
try:
installed_uuids.append(item['PayloadUUID'])
except KeyError:
pass
log.debug('Found installed uuids {}'.format(installed_uuids))
log.debug('Requested install UUIDs are {}'.format(new_uuids))
for uuid in new_uuids:
log.debug('Checking UUID "{}" to is if its installed'.format(uuid))
if uuid not in installed_uuids:
ret['changed'] = True
return ret
log.debug('Profile UUID of {} appears to be installed'.format(uuid))
# check the top keys to see if they differ.
top_keys = _check_top_level_key(current_items, new_prof_data)
if top_keys['differ']:
log.debug('Top Level Keys differ.')
ret['installed'] = False
ret['old_payload'] = top_keys['old_kv']
ret['new_payload'] = top_keys['new_kv']
return ret
# profile should be correctly installed.
ret['installed'] = True
return ret
def items():
'''
Retrieve all profiles in full
CLI Example:
.. code-block:: bash
salt '*' profiles.items
'''
tmpdir = tempfile.mkdtemp('.profiles')
tmpfile = os.path.join(tmpdir, 'profiles.plist')
status = __salt__['cmd.retcode']('/usr/bin/profiles -P -o {}'.format(tmpfile))
if not status == 0:
raise salt.exceptions.CommandExecutionError(
'Failed to read profiles or write to temporary file'
)
profiles = plistlib.readPlist(tmpfile)
os.unlink(tmpfile)
os.rmdir(tmpdir)
return profiles
def exists(identifier):
'''
Determine whether a profile with the given identifier is installed.
Returns True or False
CLI Example:
.. code-block:: bash
salt '*' profiles.installed com.apple.mdm.hostname.local.ABCDEF
'''
profiles = __salt__['profile.items']()
for domain, payload_content in profiles.items():
for payload in payload_content:
if payload['ProfileIdentifier'] == identifier:
return True
return False
def generate(identifier, profile_uuid=None, **kwargs):
'''
Generate a configuration profile.
Intended to be used by other execution and state modules to prepare a profile for installation.
Not really intended for CLI usage.
As per the documentation, only the identifier and uuid are actually compulsory keys. It is possible to make
a profile without anything else, however the profile will be downright useless.
identifier
The profile identifier, which is the primary key for identifying whether a profile is installed.
profile_uuid
Normally you would leave this blank, and the module will generate a UUID for you. However, if you specifically
need to test with a fixed uuid, this can be set.
Keyword arguments:
description
Description of the profile
displayname
The name of the profile shown to the user
organization
The organization issuing the profile
content
The payload content for the profile, as a hash
removaldisallowed : False
Whether removal of the profile will be allowed
scope : System
The scope of items to install, the default is system wide but may also be user.
Note that only the System scope really makes sense in salt.
removaldate
The date on which the profile will be automatically removed.
durationuntilremoval
The number of seconds until profile is automatically removed, the smaller of this and removaldate will be
used.
consenttext : { "default": "message" }
The warning/disclaimer shown when installing the profile interactively.
'''
if not profile_uuid:
profile_uuid = uuid.uuid4()
log.debug("Creating new profile with UUID: {}".format(str(profile_uuid)))
VALID_PROPERTIES = ['description', 'displayname', 'organization', 'content', 'removaldisallowed', 'scope',
'removaldate', 'durationuntilremoval', 'consenttext']
log.debug('Looping through kwargs')
validkwargs = {k: v for k, v in kwargs.items() if k in VALID_PROPERTIES}
document = {'PayloadScope': 'System', 'PayloadUUID': str(profile_uuid), 'PayloadVersion': 1,
'PayloadType': 'Configuration', 'PayloadIdentifier': identifier}
for k, v in validkwargs.items():
if k in ('__id__', 'fun', 'state', '__env__', '__sls__', 'order', 'watch', 'watch_in', 'require',
'require_in', 'prereq', 'prereq_in'):
pass
elif k == 'content':
# As per managedmac for puppet, it's necessary to generate UUIDs for each payload based upon the content
# in order to detect changes to the payload.
# Transform a dict of { type: data } to { PayloadContent: data, }
payload_content = _transform_content(kwargs['content'], identifier)
document['PayloadContent'] = payload_content
elif k == 'description':
document['PayloadDescription'] = v
elif k == 'displayname':
document['PayloadDisplayName'] = v
elif k == 'organization':
document['PayloadOrganization'] = v
elif k == 'removaldisallowed':
document['PayloadRemovalDisallowed'] = (v is True)
if six.PY3:
plist_content = plistlib.dumps(document)
else:
plist_content = plistlib.writePlistToString(document)
return plist_content
def install(path):
'''
Install a configuration profile.
path
Full path to the configuration profile to install
'''
status = __salt__['cmd.retcode']('/usr/bin/profiles -I -F {}'.format(path))
if not status == 0:
raise salt.exceptions.CommandExecutionError(
'Failed to install profile at path: {}'.format(path)
)
return True
def remove(identifier):
'''
Remove a configuration profile by its profile identifier
identifier
The ProfileIdentifier
'''
status = __salt__['cmd.retcode']('/usr/bin/profiles -R -p {}'.format(identifier))
if not status == 0:
raise salt.exceptions.CommandExecutionError(
'Failed to remove profile with identifier: {}'.format(identifier)
)
return True
def item_keys(identifier):
'''
List all of the keys for an identifier and their values
identifier
The ProfileIdentifier
CLI Example:
.. code-block:: bash
salt '*' profiles.item_keys com.apple.mdm.hostname.local.ABCDEF
'''
profiles = items()
for domain, payload_content in profiles.items():
for payload in payload_content:
if payload['ProfileIdentifier'] == identifier:
return payload
log.warning('Profile identifier "{}" not found'.format(identifier))
return False
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
import multiprocessing.pool
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout or 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import signal
import sys
import tempfile
import textwrap
import threading
import subprocess2
ROOT = os.path.abspath(os.path.dirname(__file__))
GIT_EXE = ROOT+'\\git.bat' if sys.platform.startswith('win') else 'git'
TEST_MODE = False
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# First version where the for-each-ref command's format string supported the
# upstream:track token.
MIN_UPSTREAM_TRACK_GIT_VERSION = (1, 9)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
def _inner_gen():
yield function()
while True:
yield
return _inner_gen().next
## Git functions
def branch_config(branch, option, default=None):
return config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = run('config', '--get-regexp', reg.pattern).splitlines()
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(*args):
NO_BRANCH = ('* (no branch', '* (detached from ')
key = 'depot-tools.branch-limit'
limit = 20
try:
limit = int(config(key, limit))
except ValueError:
pass
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if num > limit:
print >> sys.stderr, textwrap.dedent("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
""" % (num, limit, key))
sys.exit(1)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def freeze():
took_action = False
try:
run('commit', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
try:
run('add', '-A')
run('commit', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
if not took_action:
return 'Nothing to freeze.'
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if not parent:
return None
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.iteritems():
f.write('%s %s %s\t%s\0' % (mode, typ, ref, name))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return map(binascii.unhexlify, hash_multi(*commitrefs))
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run('rebase', '--abort')
return RebaseRet(False, cpe.stdout, cpe.stderr)
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def root():
return config('depot-tools.upstream', 'origin/master')
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or '').strip()
err = (err or '').strip()
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit.'
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
run('commit', '-a', '-F', '-', indata=log_msg)
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in (s.strip() for s in run_stream('rev-list', 'HEAD').xreadlines()):
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.iteritems():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's decendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream ahead behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
ahead_match = re.search(r'ahead (\d+)', tracking_status)
ahead = int(ahead_match.group(1)) if ahead_match else None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, ahead=ahead, behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
return dict(info_map.items() + missing_upstreams.items())
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy to export custom proto formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.decision_trees.proto import generic_tree_model_extensions_pb2
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import tag_constants
def make_custom_export_strategy(name,
convert_fn,
feature_columns,
export_input_fn):
"""Makes custom exporter of GTFlow tree format.
Args:
name: A string, for the name of the export strategy.
convert_fn: A function that converts the tree proto to desired format and
saves it to the desired location. Can be None to skip conversion.
feature_columns: A list of feature columns.
export_input_fn: A function that takes no arguments and returns an
`InputFnOps`.
Returns:
An `ExportStrategy`.
"""
base_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=export_input_fn)
input_fn = export_input_fn()
(sorted_feature_names, dense_floats, sparse_float_indices, _, _,
sparse_int_indices, _, _) = gbdt_batch.extract_features(
input_fn.features, feature_columns)
def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None):
"""A wrapper to export to SavedModel, and convert it to other formats."""
result_dir = base_strategy.export(estimator, export_dir,
checkpoint_path,
eval_result)
with ops.Graph().as_default() as graph:
with tf_session.Session(graph=graph) as sess:
saved_model_loader.load(
sess, [tag_constants.SERVING], result_dir)
# Note: This is GTFlow internal API and might change.
ensemble_model = graph.get_operation_by_name(
"ensemble_model/TreeEnsembleSerialize")
_, dfec_str = sess.run(ensemble_model.outputs)
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
dtec.ParseFromString(dfec_str)
# Export the result in the same folder as the saved model.
if convert_fn:
convert_fn(dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices),
len(sparse_int_indices), result_dir, eval_result)
feature_importances = _get_feature_importances(
dtec, sorted_feature_names,
len(dense_floats),
len(sparse_float_indices), len(sparse_int_indices))
sorted_by_importance = sorted(
feature_importances.items(), key=lambda x: -x[1])
assets_dir = os.path.join(result_dir, "assets.extra")
gfile.MakeDirs(assets_dir)
with gfile.GFile(os.path.join(assets_dir, "feature_importances"),
"w") as f:
f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance))
return result_dir
return export_strategy.ExportStrategy(name, export_fn)
def convert_to_universal_format(dtec, sorted_feature_names,
num_dense, num_sparse_float,
num_sparse_int):
"""Convert GTFlow trees to universal format."""
del num_sparse_int # unused.
model_and_features = generic_tree_model_pb2.ModelAndFeatures()
# TODO(jonasz): Feature descriptions should contain information about how each
# feature is processed before it's fed to the model (e.g. bucketing
# information). As of now, this serves as a list of features the model uses.
for feature_name in sorted_feature_names:
model_and_features.features[feature_name].SetInParent()
model = model_and_features.model
model.ensemble.summation_combination_technique.SetInParent()
for tree_idx in range(len(dtec.trees)):
gtflow_tree = dtec.trees[tree_idx]
tree_weight = dtec.tree_weights[tree_idx]
member = model.ensemble.members.add()
member.submodel_id.value = tree_idx
tree = member.submodel.decision_tree
for node_idx in range(len(gtflow_tree.nodes)):
gtflow_node = gtflow_tree.nodes[node_idx]
node = tree.nodes.add()
node_type = gtflow_node.WhichOneof("node")
node.node_id.value = node_idx
if node_type == "leaf":
leaf = gtflow_node.leaf
if leaf.HasField("vector"):
for weight in leaf.vector.value:
new_value = node.leaf.vector.value.add()
new_value.float_value = weight * tree_weight
else:
for index, weight in zip(
leaf.sparse_vector.index, leaf.sparse_vector.value):
new_value = node.leaf.sparse_vector.sparse_value[index]
new_value.float_value = weight * tree_weight
else:
node = node.binary_node
# Binary nodes here.
if node_type == "dense_float_binary_split":
split = gtflow_node.dense_float_binary_split
feature_id = split.feature_column
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_left":
split = gtflow_node.sparse_float_binary_split_default_left.split
node.default_direction = (
generic_tree_model_pb2.BinaryNode.LEFT)
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "sparse_float_binary_split_default_right":
split = gtflow_node.sparse_float_binary_split_default_right
node.default_direction = (
generic_tree_model_pb2.BinaryNode.RIGHT)
feature_id = split.feature_column + num_dense
inequality_test = node.inequality_left_child_test
inequality_test.feature_id.id.value = sorted_feature_names[feature_id]
inequality_test.type = (
generic_tree_model_pb2.InequalityTest.LESS_OR_EQUAL)
inequality_test.threshold.float_value = split.threshold
elif node_type == "categorical_id_binary_split":
split = gtflow_node.categorical_id_binary_split
node.default_direction = generic_tree_model_pb2.BinaryNode.RIGHT
feature_id = split.feature_column + num_dense + num_sparse_float
categorical_test = (
generic_tree_model_extensions_pb2.MatchingValuesTest())
categorical_test.feature_id.id.value = sorted_feature_names[
feature_id]
matching_id = categorical_test.value.add()
matching_id.int64_value = split.feature_id
node.custom_left_child_test.Pack(categorical_test)
else:
raise ValueError("Unexpected node type %s", node_type)
node.left_child_id.value = split.left_id
node.right_child_id.value = split.right_id
return model_and_features
def _get_feature_importances(dtec, feature_names, num_dense_floats,
num_sparse_float, num_sparse_int):
"""Export the feature importance per feature column."""
del num_sparse_int # Unused.
sums = collections.defaultdict(lambda: 0)
for tree_idx in range(len(dtec.trees)):
tree = dtec.trees[tree_idx]
for tree_node in tree.nodes:
node_type = tree_node.WhichOneof("node")
if node_type == "dense_float_binary_split":
split = tree_node.dense_float_binary_split
split_column = feature_names[split.feature_column]
elif node_type == "sparse_float_binary_split_default_left":
split = tree_node.sparse_float_binary_split_default_left.split
split_column = feature_names[split.feature_column + num_dense_floats]
elif node_type == "sparse_float_binary_split_default_right":
split = tree_node.sparse_float_binary_split_default_right.split
split_column = feature_names[split.feature_column + num_dense_floats]
elif node_type == "categorical_id_binary_split":
split = tree_node.categorical_id_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "categorical_id_set_membership_binary_split":
split = tree_node.categorical_id_set_membership_binary_split
split_column = feature_names[split.feature_column + num_dense_floats +
num_sparse_float]
elif node_type == "leaf":
assert tree_node.node_metadata.gain == 0
continue
else:
raise ValueError("Unexpected split type %s", node_type)
# Apply shrinkage factor. It is important since it is not always uniform
# across different trees.
sums[split_column] += (
tree_node.node_metadata.gain * dtec.tree_weights[tree_idx])
return dict(sums)
| |
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <erikz@il.ibm.com>
# Avishay Traeger <avishay@il.ibm.com>
import copy
from mox3 import mox
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import xiv_ds8k
from cinder.volume import volume_types
FAKE = "fake"
FAKE2 = "fake2"
CANNOT_DELETE = "Can not delete"
TOO_BIG_VOLUME_SIZE = 12000
POOL_SIZE = 100
CONSISTGROUP_ID = 1
VOLUME = {'size': 16,
'name': FAKE,
'id': 1,
'consistencygroup_id': CONSISTGROUP_ID,
'status': 'available'}
VOLUME2 = {'size': 32,
'name': FAKE2,
'id': 2,
'consistencygroup_id': CONSISTGROUP_ID,
'status': 'available'}
MANAGED_FAKE = "managed_fake"
MANAGED_VOLUME = {'size': 16,
'name': MANAGED_FAKE,
'id': 2}
REPLICA_FAKE = "repicated_fake"
REPLICATED_VOLUME = {'size': 64,
'name': REPLICA_FAKE,
'id': 2}
CONTEXT = {}
FAKESNAPSHOT = 'fakesnapshot'
SNAPSHOT = {'name': 'fakesnapshot',
'id': 3}
CONSISTGROUP = {'id': CONSISTGROUP_ID, }
CG_SNAPSHOT_ID = 1
CG_SNAPSHOT = {'id': CG_SNAPSHOT_ID,
'consistencygroup_id': CONSISTGROUP_ID}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
CONF = cfg.CONF
class XIVDS8KFakeProxyDriver(object):
"""Fake IBM XIV and DS8K Proxy Driver."""
def __init__(self, xiv_ds8k_info, logger, expt, driver=None):
"""Initialize Proxy."""
self.xiv_ds8k_info = xiv_ds8k_info
self.logger = logger
self.exception = expt
self.xiv_ds8k_portal = \
self.xiv_ds8k_iqn = FAKE
self.volumes = {}
self.snapshots = {}
self.driver = driver
def setup(self, context):
if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\
.configuration.san_login:
raise self.exception.NotAuthorized()
if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\
.configuration.san_ip:
raise self.exception.HostNotFound(host='fake')
def create_volume(self, volume):
if volume['size'] > POOL_SIZE:
raise self.exception.VolumeBackendAPIException(data='blah')
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def manage_volume_get_size(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return self.volumes[existing_ref['source-name']]['size']
def manage_volume(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
volume['size'] = MANAGED_VOLUME['size']
return {}
def unmanage_volume(self, volume):
pass
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_portal': self.xiv_ds8k_portal,
'target_iqn': self.xiv_ds8k_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_ds8k_portal,
self.xiv_ds8k_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
if not self.is_volume_attached(volume, connector):
raise self.exception.NotFound(_('Volume not found for '
'instance %(instance_id)s.')
% {'instance_id': 'fake'})
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return (self.volumes[volume['name']].get('attached', None)
== connector)
def reenable_replication(self, context, volume):
model_update = {}
if volume['replication_status'] == 'inactive':
model_update['replication_status'] = 'active'
elif volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
model_update['replication_extended_status'] = 'some_status'
model_update['replication_driver_data'] = 'some_data'
return model_update
def get_replication_status(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'active'}
def promote_replica(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'inactive'}
def create_replica_test_volume(self, volume, src_vref):
if volume['size'] != src_vref['size']:
raise exception.InvalidVolume(
reason="Target and source volumes have different size.")
return
def retype(self, ctxt, volume, new_type, diff, host):
volume['easytier'] = new_type['extra_specs']['easytier']
return True, volume
def create_consistencygroup(self, ctxt, group):
volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']]
if volumes:
raise exception.CinderException(
message='The consistency group id of volume may be wrong.')
return {'status': 'available'}
def delete_consistencygroup(self, ctxt, group):
volumes = []
for volume in self.volumes.values():
if (group.get('id', None)
== volume.get('consistencygroup_id', None)):
if volume['name'] == CANNOT_DELETE:
raise exception.VolumeBackendAPIException(
message='Volume can not be deleted')
else:
volume['status'] = 'deleted'
volumes.append(volume)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== group.get('id', None))}
# Delete volume in consistency group
self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None)
== group.get('id', None))}
return {'status': 'deleted'}, volumes
def update_consistencygroup(
self, context, group,
add_volumes, remove_volumes):
model_update = {'status': 'available'}
return model_update, None, None
def create_consistencygroup_from_src(
self, context, group, volumes, cgsnapshot, snapshots,
source_cg=None, source_vols=None):
return None, None
def create_cgsnapshot(self, ctxt, cgsnapshot):
snapshots = []
for volume in self.volumes.values():
if (cgsnapshot.get('consistencygroup_id', None)
== volume.get('consistencygroup_id', None)):
if volume['size'] > POOL_SIZE / 2:
raise self.exception.VolumeBackendAPIException(data='blah')
snapshot = copy.deepcopy(volume)
snapshot['name'] = CANNOT_DELETE \
if snapshot['name'] == CANNOT_DELETE \
else snapshot['name'] + 'Snapshot'
snapshot['status'] = 'available'
snapshot['cgsnapshot_id'] = cgsnapshot.get('id', None)
snapshot['consistencygroup_id'] = \
cgsnapshot.get('consistencygroup_id', None)
self.snapshots[snapshot['name']] = snapshot
snapshots.append(snapshot)
return {'status': 'available'}, snapshots
def delete_cgsnapshot(self, ctxt, cgsnapshot):
snapshots = []
for snapshot in self.snapshots.values():
if (cgsnapshot.get('id', None)
== snapshot.get('cgsnapshot_id', None)):
if snapshot['name'] == CANNOT_DELETE:
raise exception.VolumeBackendAPIException(
message='Snapshot can not be deleted')
else:
snapshot['status'] = 'deleted'
snapshots.append(snapshot)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== cgsnapshot.get('cgsnapshot_id', None))}
return {'status': 'deleted'}, snapshots
class XIVDS8KVolumeDriverTest(test.TestCase):
"""Test IBM XIV and DS8K volume driver."""
def setUp(self):
"""Initialize IBM XIV and DS8K Driver."""
super(XIVDS8KVolumeDriverTest, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.xiv_ds8k_proxy = \
'cinder.tests.unit.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver'
configuration.xiv_ds8k_connection_type = 'iscsi'
configuration.xiv_chap = 'disabled'
configuration.san_ip = FAKE
configuration.management_ips = FAKE
configuration.san_login = FAKE
configuration.san_clustername = FAKE
configuration.san_password = FAKE
configuration.append_config_values(mox.IgnoreArg())
self.driver = xiv_ds8k.XIVDS8KDriver(
configuration=configuration)
def test_initialized_should_set_xiv_ds8k_info(self):
"""Test that the san flags are passed to the IBM proxy."""
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'],
self.driver.configuration.san_login)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'],
self.driver.configuration.san_password)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'],
self.driver.configuration.san_ip)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'],
self.driver.configuration.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_ds8k_proxy validates credentials."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid'
self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None)
def test_setup_should_fail_if_connection_is_invalid(self):
"""Test that the xiv_ds8k_proxy validates connection."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \
'invalid'
self.assertRaises(exception.HostNotFound, self.driver.do_setup, None)
def test_create_volume(self):
"""Test creating a volume."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertTrue(has_volume)
self.driver.delete_volume(VOLUME)
def test_volume_exists(self):
"""Test the volume exist method with a volume that doesn't exist."""
self.driver.do_setup(None)
self.assertFalse(
self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE})
)
def test_delete_volume(self):
"""Verify that a volume is deleted."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.delete_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertFalse(has_volume)
def test_delete_volume_should_fail_for_not_existing_volume(self):
"""Verify that deleting a non-existing volume is OK."""
self.driver.do_setup(None)
self.driver.delete_volume(VOLUME)
def test_create_volume_should_fail_if_no_pool_space_left(self):
"""Verify that the xiv_ds8k_proxy validates volume pool space."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE,
'id': 1,
'size': TOO_BIG_VOLUME_SIZE})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
def test_initialize_connection_should_fail_for_non_existing_volume(self):
"""Verify that initialize won't work for non-existing volume."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
def test_terminate_connection_should_fail_on_non_existing_volume(self):
"""Test that terminate won't work for non-existing volumes."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
def test_manage_existing_get_size(self):
"""Test that manage_existing_get_size returns the expected size. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
return_size = self.driver.manage_existing_get_size(
VOLUME,
existing_ref)
self.assertEqual(return_size, MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_get_size_should_fail_on_non_existing_volume(self):
"""Test that manage_existing_get_size fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing_get_size,
VOLUME,
existing_ref)
def test_manage_existing(self):
"""Test that manage_existing returns successfully. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.driver.manage_existing(VOLUME, existing_ref)
self.assertEqual(VOLUME['size'], MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_should_fail_on_non_existing_volume(self):
"""Test that manage_existing fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing,
VOLUME,
existing_ref)
def test_reenable_replication(self):
"""Test that reenable_replication returns successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.reenable_replication(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
self.assertTrue('replication_extended_status' in model_update)
self.assertTrue('replication_driver_data' in model_update)
def test_reenable_replication_fail_on_cinder_exception(self):
"""Test that reenable_replication fails on driver raising exception."""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.reenable_replication,
CONTEXT,
replicated_volume
)
def test_get_replication_status(self):
"""Test that get_replication_status return successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.get_replication_status(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
def test_get_replication_status_fail_on_exception(self):
"""Test that get_replication_status fails on exception"""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.get_replication_status,
CONTEXT,
replicated_volume
)
def test_promote_replica(self):
"""Test that promote_replica returns successfully. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# assume the replication_status should be active
replicated_volume['replication_status'] = 'active'
model_update = self.driver.promote_replica(
CONTEXT,
replicated_volume
)
# after promoting, replication_status should be inactive
self.assertEqual(
model_update['replication_status'],
'inactive'
)
def test_promote_replica_fail_on_cinder_exception(self):
"""Test that promote_replica fails on CinderException. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.promote_replica,
CONTEXT,
replicated_volume
)
def test_create_replica_test_volume(self):
"""Test that create_replica_test_volume returns successfully."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
tgt_volume['size'] = src_volume['size']
model_update = self.driver.create_replica_test_volume(
tgt_volume,
src_volume
)
self.assertTrue(model_update is None)
def test_create_replica_test_volume_fail_on_diff_size(self):
"""Test that create_replica_test_volume fails on diff size."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
self.assertRaises(
exception.InvalidVolume,
self.driver.create_replica_test_volume,
tgt_volume,
src_volume
)
def test_retype(self):
"""Test that retype returns successfully."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
ret = self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertTrue(ret)
self.assertTrue(volume['easytier'])
def test_retype_fail_on_exception(self):
"""Test that retype fails on exception."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new')
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.assertRaises(
KeyError,
self.driver.retype,
ctxt, volume, new_type, diff, host
)
def test_create_consistencygroup(self):
"""Test that create_consistencygroup return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
self.assertEqual('available',
model_update['status'],
"Consistency Group created failed")
def test_create_consistencygroup_fail_on_cg_not_empty(self):
"""Test create_consistencygroup with empty consistency group."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create volumes
# And add the volumes into the consistency group before creating cg
self.driver.create_volume(VOLUME)
self.assertRaises(exception.CinderException,
self.driver.create_consistencygroup,
ctxt, CONSISTGROUP)
def test_delete_consistencygroup(self):
"""Test that delete_consistencygroup return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Delete consistency group
model_update, volumes = \
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
# Verify the result
self.assertEqual('deleted',
model_update['status'],
'Consistency Group deleted failed')
for volume in volumes:
self.assertEqual('deleted',
volume['status'],
'Consistency Group deleted failed')
def test_delete_consistencygroup_fail_on_volume_not_delete(self):
"""Test delete_consistencygroup with volume delete failure."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the volume not to be deleted
volume = copy.deepcopy(VOLUME)
volume['name'] = CANNOT_DELETE
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_consistencygroup,
ctxt, CONSISTGROUP)
def test_create_cgsnapshot(self):
"""Test that create_cgsnapshot return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Create consistency group snapshot
model_update, snapshots = \
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
# Verify the result
self.assertEqual('available',
model_update['status'],
'Consistency Group Snapshot created failed')
for snap in snapshots:
self.assertEqual('available',
snap['status'])
# Clean the environment
self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT)
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_create_cgsnapshot_fail_on_no_pool_space_left(self):
"""Test that create_cgsnapshot return fail when no pool space left."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the volume size
volume = copy.deepcopy(VOLUME)
volume['size'] = POOL_SIZE / 2 + 1
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
ctxt, CG_SNAPSHOT)
# Clean the environment
self.driver.volumes = None
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_delete_cgsnapshot(self):
"""Test that delete_cgsnapshot return successfully."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Create volumes and add them to consistency group
self.driver.create_volume(VOLUME)
# Create consistency group snapshot
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
# Delete consistency group snapshot
model_update, snapshots = \
self.driver.delete_cgsnapshot(ctxt, CG_SNAPSHOT)
# Verify the result
self.assertEqual('deleted',
model_update['status'],
'Consistency Group Snapshot deleted failed')
for snap in snapshots:
self.assertEqual('deleted',
snap['status'])
# Clean the environment
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_delete_cgsnapshot_fail_on_snapshot_not_delete(self):
"""Test delete_cgsnapshot when the snapshot cannot be deleted."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group
self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
# Set the snapshot not to be deleted
volume = copy.deepcopy(VOLUME)
volume['name'] = CANNOT_DELETE
# Create volumes and add them to consistency group
self.driver.create_volume(volume)
# Create consistency group snapshot
self.driver.create_cgsnapshot(ctxt, CG_SNAPSHOT)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_cgsnapshot,
ctxt, CG_SNAPSHOT)
def test_update_consistencygroup_without_volumes(self):
"""Test update_consistencygroup when there are no volumes specified."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Update consistency group
model_update, added, removed = self.driver.update_consistencygroup(
ctxt, CONSISTGROUP, [], [])
self.assertEqual('available',
model_update['status'],
"Consistency Group update failed")
self.assertFalse(added,
"added volumes list is not empty")
self.assertFalse(removed,
"removed volumes list is not empty")
def test_update_consistencygroup_with_volumes(self):
"""Test update_consistencygroup when there are volumes specified."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Update consistency group
model_update, added, removed = self.driver.update_consistencygroup(
ctxt, CONSISTGROUP, [VOLUME], [VOLUME2])
self.assertEqual('available',
model_update['status'],
"Consistency Group update failed")
self.assertFalse(added,
"added volumes list is not empty")
self.assertFalse(removed,
"removed volumes list is not empty")
def test_create_consistencygroup_from_src_without_volumes(self):
"""Test create_consistencygroup_from_src with no volumes specified."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group from source
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
ctxt, CONSISTGROUP, [], CG_SNAPSHOT, []))
# model_update can be None or return available in status
if model_update:
self.assertEqual('available',
model_update['status'],
"Consistency Group create from source failed")
# volumes_model_update can be None or return available in status
if volumes_model_update:
self.assertFalse(volumes_model_update,
"volumes list is not empty")
def test_create_consistencygroup_from_src_with_volumes(self):
"""Test create_consistencygroup_from_src with volumes specified."""
self.driver.do_setup(None)
ctxt = context.get_admin_context()
# Create consistency group from source
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
ctxt, CONSISTGROUP, [VOLUME], CG_SNAPSHOT, [SNAPSHOT]))
# model_update can be None or return available in status
if model_update:
self.assertEqual('available',
model_update['status'],
"Consistency Group create from source failed")
# volumes_model_update can be None or return available in status
if volumes_model_update:
self.assertEqual('available',
volumes_model_update['status'],
"volumes list status failed")
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import local
from heat.openstack.common import log as logging
from heat.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.iteritems()])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool,
wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data):
self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
| |
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
from nova.openstack.common import jsonutils
from nova import test
import nova.tests.image.fake
from nova.virt.xenapi.client import session
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
def stubout_firewall_driver(stubs, conn):
def fake_none(self, *args):
return
_vmops = conn._vmops
stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
def stubout_instance_snapshot(stubs):
def fake_fetch_image(context, session, instance, name_label, image, type):
return {'root': dict(uuid=_make_fake_vdi(), file=None),
'kernel': dict(uuid=_make_fake_vdi(), file=None),
'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
def fake_wait_for_vhd_coalesce(*args):
#TODO(sirp): Should we actually fake out the data here
return "fakeparent", "fakebase"
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls, product_version=(5, 6, 2),
product_brand='XenServer', **opt_args):
"""Stubs out methods from XenAPISession."""
stubs.Set(session.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
lambda s: (product_version, product_brand))
def stubout_get_this_vm_uuid(stubs):
def f(session):
vms = [rec['uuid'] for ref, rec
in fake.get_all_records('VM').iteritems()
if rec['is_control_domain']]
return vms[0]
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
def stubout_image_service_download(stubs):
def fake_download(*args, **kwargs):
pass
stubs.Set(nova.tests.image.fake._FakeImageService,
'download', fake_download)
def stubout_stream_disk(stubs):
def fake_stream_disk(*args, **kwargs):
pass
stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs stu have PV kernels."""
def f(*args):
return False
stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
def stubout_is_snapshot(stubs):
"""Always returns true
xenapi fake driver does not create vmrefs for snapshots.
"""
def f(*args):
return True
stubs.Set(vm_utils, 'is_snapshot', f)
def stubout_lookup_image(stubs):
"""Simulates a failure in lookup image."""
def f(_1, _2, _3, _4):
raise Exception("Test Exception raised by fake lookup_image")
stubs.Set(vm_utils, 'lookup_image', f)
def stubout_fetch_disk_image(stubs, raise_failure=False):
"""Simulates a failure in fetch image_glance_disk."""
def _fake_fetch_disk_image(context, session, instance, name_label, image,
image_type):
if raise_failure:
raise fake.Failure("Test Exception raised by "
"fake fetch_image_glance_disk")
elif image_type == vm_utils.ImageType.KERNEL:
filename = "kernel"
elif image_type == vm_utils.ImageType.RAMDISK:
filename = "ramdisk"
else:
filename = "unknown"
vdi_type = vm_utils.ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
def stubout_create_vm(stubs):
"""Simulates a failure in create_vm."""
def f(*args):
raise fake.Failure("Test Exception raised by fake create_vm")
stubs.Set(vm_utils, 'create_vm', f)
def stubout_attach_disks(stubs):
"""Simulates a failure in _attach_disks."""
def f(*args):
raise fake.Failure("Test Exception raised by fake _attach_disks")
stubs.Set(vmops.VMOps, '_attach_disks', f)
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', sr_ref)
vdi_rec = fake.get_record('VDI', vdi_ref)
return vdi_rec['uuid']
class FakeSessionForVMTests(fake.SessionBase):
"""Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
"*filter\n"
":INPUT ACCEPT [0:0]\n"
":FORWARD ACCEPT [0:0]\n"
":OUTPUT ACCEPT [0:0]\n"
"COMMIT\n"
"# Completed on Sun Nov 6 22:49:02 2011\n")
def host_call_plugin(self, _1, _2, plugin, method, _5):
if (plugin, method) == ('glance', 'download_vhd'):
root_uuid = _make_fake_vdi()
return pickle.dumps(dict(root=dict(uuid=root_uuid)))
elif (plugin, method) == ("xenhost", "iptables_config"):
return fake.as_json(out=self._fake_iptables_save_output,
err='')
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
vm['domid'] = random.randrange(1, 1 << 16)
return vm
def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
vm_rec['resident_on'] = host_ref
def VDI_snapshot(self, session_ref, vm_ref, _1):
sr_ref = "fakesr"
return fake.create_vdi('fakelabel', sr_ref, read_only=True)
def SR_scan(self, session_ref, sr_ref):
pass
class FakeSessionForFirewallTests(FakeSessionForVMTests):
"""Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
if hasattr(test_case, '_in_rules'):
self._in_rules = test_case._in_rules
if hasattr(test_case, '_in6_filter_rules'):
self._in6_filter_rules = test_case._in6_filter_rules
self._test_case = test_case
def host_call_plugin(self, _1, _2, plugin, method, args):
"""Mock method four host_call_plugin to be used in unit tests
for the dom0 iptables Firewall drivers for XenAPI
"""
if plugin == "xenhost" and method == "iptables_config":
# The command to execute is a json-encoded list
cmd_args = args.get('cmd_args', None)
cmd = jsonutils.loads(cmd_args)
if not cmd:
ret_str = ''
else:
output = ''
process_input = args.get('process_input', None)
if cmd == ['ip6tables-save', '-c']:
output = '\n'.join(self._in6_filter_rules)
if cmd == ['iptables-save', '-c']:
output = '\n'.join(self._in_rules)
if cmd == ['iptables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
if self._test_case is not None:
self._test_case._out_rules = lines
output = '\n'.join(lines)
if cmd == ['ip6tables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
output = '\n'.join(lines)
ret_str = fake.as_json(out=output, err='')
return ret_str
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, args))
def stub_out_vm_methods(stubs):
def fake_acquire_bootlock(self, vm):
pass
def fake_release_bootlock(self, vm):
pass
def fake_generate_ephemeral(*args):
pass
def fake_wait_for_device(dev):
pass
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
class FakeSessionForVolumeTests(fake.SessionBase):
"""Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
refs = fake.get_all('VDI')
for ref in refs:
rec = fake.get_record('VDI', ref)
if rec['uuid'] == uuid:
valid_vdi = True
if not valid_vdi:
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
"""Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
def PBD_unplug(self, _1, ref):
rec = fake.get_record('PBD', ref)
rec['currently-attached'] = False
def SR_forget(self, _1, ref):
pass
def stub_out_migration_methods(stubs):
fakesr = fake.create_sr()
def fake_import_all_migrated_disks(session, instance):
vdi_ref = fake.create_vdi(instance['name'], fakesr)
vdi_rec = fake.get_record('VDI', vdi_ref)
vdi_rec['other_config']['nova_disk_type'] = 'root'
return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
"ephemerals": {}}
def fake_wait_for_instance_to_start(self, *args):
pass
def fake_get_vdi(session, vm_ref, userdevice='0'):
vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
vdi_ref = fake.create_vdi('derp', fakesr,
sm_config={'vhd-parent': vdi_rec_parent['uuid']})
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
return vdi_ref, vdi_rec
def fake_sr(session, *args):
return fakesr
def fake_get_sr_path(*args):
return "fake"
def fake_destroy(*args, **kwargs):
pass
def fake_generate_ephemeral(*args):
pass
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
fake_wait_for_instance_to_start)
stubs.Set(vm_utils, 'import_all_migrated_disks',
fake_import_all_migrated_disks)
stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
def VM_assert_can_migrate(self, session, vmref, migrate_data,
live, vdi_map, vif_map, options):
raise fake.Failure("XenAPI VM.assert_can_migrate failed")
def host_migrate_receive(self, session, hostref, networkref, options):
raise fake.Failure("XenAPI host.migrate_receive failed")
def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
vif_map, options):
raise fake.Failure("XenAPI VM.migrate_send failed")
# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
# over to use XenAPITestBaseNoDB
class XenAPITestBase(test.TestCase):
def setUp(self):
super(XenAPITestBase, self).setUp()
self.useFixture(test.ReplaceModule('XenAPI', fake))
fake.reset()
class XenAPITestBaseNoDB(test.NoDBTestCase):
def setUp(self):
super(XenAPITestBaseNoDB, self).setUp()
self.useFixture(test.ReplaceModule('XenAPI', fake))
fake.reset()
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import unittest
from webkitpy.common.net import layouttestresults_unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
from webkitpy.layout_tests.port.base import Port
from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
from webkitpy.tool.servers import rebaselineserver
class RebaselineTestTest(unittest.TestCase):
def test_text_rebaseline_update(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_new(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_1(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/win/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_2(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.checksum',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' No current baselines to move',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Moved text-expected.txt',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_only_images(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Moving current mac baselines to mac-leopard',
' Moved image-expected.checksum',
' Moved image-expected.png',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_text_rebaseline_move_already_exist(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac-leopard/fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=False,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Already had baselines in mac-leopard, could not move existing mac ones',
])
def test_image_rebaseline(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_gather_baselines(self):
example_json = layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json
results_json = json.loads(strip_json_wrapper(example_json))
server = RebaselineServer()
server._test_config = get_test_config()
server._gather_baselines(results_json)
self.assertEqual(results_json['tests'][
'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'], 'needs_rebaseline')
self.assertNotIn('prototype-chocolate.html', results_json['tests'])
def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
log = []
test_config = get_test_config(test_files, results_files)
success = rebaselineserver._rebaseline_test(
test_name,
baseline_target,
baseline_move_to,
test_config,
log=lambda l: log.append(l))
self.assertEqual(expected_log, log)
self.assertEqual(expected_success, success)
class GetActualResultFilesTest(unittest.TestCase):
def test(self):
test_config = get_test_config(result_files=(
'fast/text-actual.txt',
'fast2/text-actual.txt',
'fast/text2-actual.txt',
'fast/text-notactual.txt',
))
self.assertItemsEqual(
('text-actual.txt',),
rebaselineserver._get_actual_result_files(
'fast/text.html', test_config))
class GetBaselinesTest(unittest.TestCase):
def test_no_baselines(self):
self._assertBaselines(
test_files=(),
test_name='fast/missing.html',
expected_baselines={})
def test_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={
'mac': {'.txt': True},
'base': {'.txt': False},
})
def test_image_and_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
'platform/win/fast/image-expected.png',
'platform/win/fast/image-expected.checksum',
),
test_name='fast/image.html',
expected_baselines={
'base': {'.txt': True},
'mac': {'.checksum': True, '.png': True},
'win': {'.checksum': False, '.png': False},
})
def test_extra_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/nosuchplatform/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={'base': {'.txt': True}})
def _assertBaselines(self, test_files, test_name, expected_baselines):
actual_baselines = rebaselineserver.get_test_baselines(test_name, get_test_config(test_files))
self.assertEqual(expected_baselines, actual_baselines)
def get_test_config(test_files=[], result_files=[]):
host = MockHost()
port = host.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
results_directory = port.results_directory()
for file in test_files:
host.filesystem.write_binary_file(host.filesystem.join(layout_tests_directory, file), '')
for file in result_files:
host.filesystem.write_binary_file(host.filesystem.join(results_directory, file), '')
class TestMacPort(Port):
port_name = "mac"
FALLBACK_PATHS = {'': ['mac']}
return TestConfig(
TestMacPort(host, 'mac'),
layout_tests_directory,
results_directory,
('mac', 'mac-leopard', 'win', 'linux'),
host)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class GammaTest(test.TestCase):
def testGammaShape(self):
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5,))
self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])
self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testGammaLogPDFBoundary(self):
# When concentration = 1, we have an exponential distribution. Check that at
# 0 we have finite log prob.
rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=1., rate=rate)
log_pdf = gamma.log_prob(0.)
self.assertAllClose(np.log(rate), self.evaluate(log_pdf))
def testGammaLogPDFMultidimensional(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testGammaMean(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
if not stats:
return
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.mean()), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(gamma.mode())
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaVariance(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
if not stats:
return
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.variance()), expected_variances)
def testGammaStd(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.stddev().get_shape(), (3,))
if not stats:
return
expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)
def testGammaEntropy(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)
def testGammaSampleSmallAlpha(self):
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaSample(self):
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaFullyReparameterized(self):
alpha = constant_op.constant(4.0)
beta = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(alpha)
tape.watch(beta)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(100)
grad_alpha, grad_beta = tape.gradient(samples, [alpha, beta])
self.assertIsNotNone(grad_alpha)
self.assertIsNotNone(grad_beta)
def testGammaSampleMultiDimensional(self):
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
if not stats:
return
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(alpha_bc, scale=1 / beta_bc),
atol=0.,
rtol=.05)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=10.0,
rtol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
if not stats:
return True # If we can't test, return that the test passes.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertAllClose(
stats.gamma.mean([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
def testGammaWithSoftplusConcentrationRate(self):
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
concentration=alpha_v, rate=beta_v)
self.assertAllEqual(
self.evaluate(nn_ops.softplus(alpha_v)),
self.evaluate(gamma.concentration))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(beta_v)), self.evaluate(gamma.rate))
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = self.evaluate([kl_sample, kl_actual])
self.assertEqual(beta0.shape, kl_actual.get_shape())
if not special:
return
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-1)
if __name__ == "__main__":
test.main()
| |
import os
import m5
from m5.objects import *
m5.util.addToPath('../common')
spec_dist = os.environ.get('M5_CPU2006', '/dist/m5/cpu2006')
binary_dir = spec_dist
data_dir = binary_dir
current_pid = 100
# 400.perlbench
def perlbench():
process = Process(pid=current_pid)
process.cwd = binary_dir + '400.perlbench/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'perlbench_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['-I./lib', 'checkspam.pl', '2500', '5', '25', '11', '150', '1', '1', '1', '1']
return process
#401.bzip2
def bzip2():
global current_pid
process = Process(pid=current_pid)
current_pid = current_pid + 1
process.cwd = binary_dir + '401.bzip2/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bzip2_base.amd64-m64-gcc42-nn'
data = process.cwd+'input.program'
process.cmd = [process.executable] + [data, '280']
return process
#403.gcc
def gcc():
process = Process()
process.cwd = binary_dir + '403.gcc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gcc_base.amd64-m64-gcc42-nn'
data = process.cwd +'166.i'
output = process.cwd +'166.s'
process.cmd = [process.executable] + [data]+['-o',output]
return process
#410.bwaves
def bwaves():
process = Process()
process.cwd = binary_dir + '410.bwaves/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bwaves_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#416.gamess
def gamess():
prorcess=Process()
prorcess.cwd = binary_dir + '416.gamess/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
prorcess.executable = prorcess.cwd + 'gamess_base.amd64-m64-gcc42-nn'
prorcess.cmd = [prorcess.executable]
prorcess.input= prorcess.cwd + 'cytosine.2.config'
return prorcess
#429.mcf
def mcf():
process = Process()
process.cwd = binary_dir + '429.mcf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'mcf_base.amd64-m64-gcc42-nn'
data = process.cwd+'inp.in'
process.cmd = [process.executable] + [data]
return process
#433.milc
def milc():
process=Process()
process.cwd = binary_dir + '433.milc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'milc_base.amd64-m64-gcc42-nn'
stdin=process.cwd +'su3imp.in'
process.cmd = [process.executable]
process.input=stdin
return process
#434.zeusmp
def zeusmp():
process=Process()
process.cwd = binary_dir+'434.zeusmp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'zeusmp_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#435.gromacs
def gromacs():
process = Process()
process.cwd = binary_dir+'435.gromacs/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gromacs_base.amd64-m64-gcc42-nn'
data = process.cwd +'gromacs.tpr'
process.cmd = [process.executable] + ['-silent','-deffnm',data,'-nice','0']
return process
#436.cactusADM
def cactusADM():
process = Process()
process.cwd = binary_dir+'436.cactusADM/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'cactusADM_base.amd64-m64-gcc42-nn'
data = process.cwd+'benchADM.par'
process.cmd = [process.executable] + [data]
return process
# 437.leslie3d
def leslie3d():
process = Process()
process.cwd = binary_dir + '437.leslie3d/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'leslie3d_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
process.input = process.cwd + 'leslie3d.in'
return process
#444.namd
def namd():
process = Process()
process.cwd = binary_dir + '444.namd/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'namd_base.amd64-m64-gcc42-nn'
input= process.cwd +'namd.input'
process.cmd = [process.executable] + ['--input',input,'--iterations','38','--output','namd.out']
return process
#445.gobmk
def gobmk():
process=Process()
process.cwd = binary_dir + '445.gobmk/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gobmk_base.amd64-m64-gcc42-nn'
stdin= process.cwd +'nngs.tst'
process.cmd = [process.executable]+['--quiet','--mode','gtp']
process.input=stdin
return process
# 447.dealII TODO
#450.soplex
def soplex():
process=Process()
process.cwd = binary_dir + '450.soplex/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'soplex_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.mps'
process.cmd = [process.executable]+['-m3500',data]
return process
#453.povray
def povray():
process=Process()
process.cwd = binary_dir + '453.povray/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'povray_base.amd64-m64-gcc42-nn'
data = process.cwd +'SPEC-benchmark-ref.ini'
process.cmd = [process.executable]+[data]
return process
#454.calculix
def calculix():
process=Process()
process.cwd = binary_dir + '454.calculix/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'calculix_base.amd64-m64-gcc42-nn'
data = process.cwd +'hyperviscoplastic'
process.cmd = [process.executable]+['-i',data]
return process
#456.hmmer
def hmmer():
process=Process()
process.cwd = binary_dir + '456.hmmer/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'hmmer_base.amd64-m64-gcc42-nn'
data = process.cwd +'retro.hmm'
process.cmd = [process.executable]+['--fixed', '0', '--mean', '500', '--num', '500000', '--sd', '350', '--seed', '0', data]
return process
#458.sjeng
def sjeng():
process=Process()
process.cwd = binary_dir + '458.sjeng/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sjeng_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.txt'
process.cmd = [process.executable]+[data]
return process
#459.GemsFDTD
def GemsFDTD():
process=Process()
process.cwd = binary_dir + '459.GemsFDTD/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'GemsFDTD_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#462.libquantum
def libquantum():
process=Process()
process.cwd = binary_dir + '462.libquantum/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'libquantum_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable],'1397','8'
return process
#464.h264ref
def h264ref():
process=Process()
process.cwd = binary_dir + '464.h264ref/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'h264ref_base.amd64-m64-gcc42-nn'
data = process.cwd + 'foreman_ref_encoder_baseline.cfg'
process.cmd = [process.executable]+['-d',data]
return process
#470.lbm
def lbm():
process=Process()
process.cwd = binary_dir + '470.lbm/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'lbm_base.amd64-m64-gcc42-nn'
data= process.cwd +'100_100_130_ldc.of'
process.cmd = [process.executable]+['3000', 'reference.dat', '0', '0' ,data]
return process
#471.omnetpp
def omnetpp():
process=Process()
process.cwd = binary_dir + '471.omnetpp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'omnetpp_base.amd64-m64-gcc42-nn'
data=process.cwd +'omnetpp.ini'
process.cmd = [process.executable]+[data]
return process
#473.astar
def astar():
process=Process()
process.cwd = binary_dir + '473.astar/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'astar_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['BigLakes2048.cfg']
return process
#481.wrf
def wrf():
process=Process()
process.cwd = binary_dir + '481.wrf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'wrf_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['namelist.input']
return process
#482.sphinx3
def sphinx3():
process=Process()
process.cwd = binary_dir + '482.sphinx3/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sphinx_livepretend_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['ctlfile', '.', 'args.an4']
return process
#483.xalancbmk TODO
#998.specrand
def specrand_i():
process=Process()
process.cwd = binary_dir + '998.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
#999.specrand
def specrand_f():
process=Process()
process.cwd = binary_dir + '999.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
| |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups api."""
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder import backup as backupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
super(BackupsController, self).__init__()
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
backup = self.backup_api.get(context, backup_id=id)
req.cache_db_backup(backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, backup)
def delete(self, req, id):
"""Delete a backup."""
LOG.debug('Delete called for member %s.', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete backup with id: %s'), id, context=context)
try:
backup = self.backup_api.get(context, id)
self.backup_api.delete(context, backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
@staticmethod
def _get_backup_filter_options():
"""Return volume search options allowed by non-admin."""
return ('name', 'status', 'volume_id')
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
utils.remove_invalid_filter_options(context,
filters,
self._get_backup_filter_options())
if 'name' in filters:
filters['display_name'] = filters['name']
del filters['name']
backups = self.backup_api.get_all(context, search_opts=filters,
marker=marker,
limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
)
req.cache_db_backups(backups.objects)
if is_detail:
backups = self._view_builder.detail_list(req, backups.objects)
else:
backups = self._view_builder.summary_list(req, backups.objects)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(202)
def create(self, req, body):
"""Create a new backup."""
LOG.debug('Creating new backup %s', body)
self.assert_valid_body(body, 'backup')
context = req.environ['cinder.context']
backup = body['backup']
try:
volume_id = backup['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
container = backup.get('container', None)
if container:
utils.check_string_length(container, 'Backup container',
min_length=0, max_length=255)
self.validate_name_and_description(backup)
name = backup.get('name', None)
description = backup.get('description', None)
incremental = backup.get('incremental', False)
force = backup.get('force', False)
snapshot_id = backup.get('snapshot_id', None)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
incremental, None, force,
snapshot_id)
except (exception.InvalidVolume,
exception.InvalidSnapshot) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except (exception.VolumeNotFound,
exception.SnapshotNotFound) as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
return retval
@wsgi.response(202)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug('Restoring backup %(backup_id)s (%(body)s)',
{'backup_id': id, 'body': body})
self.assert_valid_body(body, 'restore')
context = req.environ['cinder.context']
restore = body['restore']
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id,
name=name)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
except exception.VolumeLimitExceeded as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
retval = self._view_builder.restore_summary(
req, dict(new_restore))
return retval
@wsgi.response(200)
def export_record(self, req, id):
"""Export a backup."""
LOG.debug('export record called for member %s.', id)
context = req.environ['cinder.context']
try:
backup_info = self.backup_api.export_record(context, id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.export_summary(
req, dict(backup_info))
LOG.debug('export record output: %s.', retval)
return retval
@wsgi.response(201)
def import_record(self, req, body):
"""Import a backup."""
LOG.debug('Importing record from %s.', body)
self.assert_valid_body(body, 'backup-record')
context = req.environ['cinder.context']
import_data = body['backup-record']
# Verify that body elements are provided
try:
backup_service = import_data['backup_service']
backup_url = import_data['backup_url']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.debug('Importing backup using %(service)s and url %(url)s.',
{'service': backup_service, 'url': backup_url})
try:
new_backup = self.backup_api.import_record(context,
backup_service,
backup_url)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('import record output: %s.', retval)
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
member_actions={'restore': 'POST', 'export_record': 'GET',
'action': 'POST'})
resources.append(res)
return resources
| |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import glob
import os
import subprocess
import sys
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Iterator, List, Mapping, Union
import pytest
from pants.base.build_environment import get_buildroot
from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
from pants.option.config import TomlSerializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.pants_daemon_client import PantsDaemonClient
from pants.testutil._process_handler import SubprocessProcessHandler
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import fast_relpath, safe_file_dump, safe_mkdir, safe_open
from pants.util.osutil import Pid
from pants.util.strutil import ensure_binary
# NB: If `shell=True`, it's a single `str`.
Command = Union[str, List[str]]
@dataclass(frozen=True)
class PantsResult:
command: Command
exit_code: int
stdout: str
stderr: str
workdir: str
pid: Pid
def _format_unexpected_error_code_msg(self, msg: str | None) -> str:
details = [msg] if msg else []
details.append(" ".join(self.command))
details.append(f"exit_code: {self.exit_code}")
def indent(content):
return "\n\t".join(content.splitlines())
details.append(f"stdout:\n\t{indent(self.stdout)}")
details.append(f"stderr:\n\t{indent(self.stderr)}")
return "\n".join(details)
def assert_success(self, msg: str | None = None) -> None:
assert self.exit_code == 0, self._format_unexpected_error_code_msg(msg)
def assert_failure(self, msg: str | None = None) -> None:
assert self.exit_code != 0, self._format_unexpected_error_code_msg(msg)
@dataclass(frozen=True)
class PantsJoinHandle:
command: Command
process: subprocess.Popen
workdir: str
def join(self, stdin_data: bytes | str | None = None, tee_output: bool = False) -> PantsResult:
"""Wait for the pants process to complete, and return a PantsResult for it."""
communicate_fn = self.process.communicate
if tee_output:
# TODO: MyPy complains that SubprocessProcessHandler.communicate_teeing_stdout_and_stderr does
# not have the same type signature as subprocess.Popen.communicate_teeing_stdout_and_stderr.
# It's possibly not worth trying to fix this because the type stubs for subprocess.Popen are
# very complex and also not very precise, given how many different configurations Popen can
# take.
communicate_fn = SubprocessProcessHandler(self.process).communicate_teeing_stdout_and_stderr # type: ignore[assignment]
if stdin_data is not None:
stdin_data = ensure_binary(stdin_data)
(stdout, stderr) = communicate_fn(stdin_data)
if self.process.returncode != PANTS_SUCCEEDED_EXIT_CODE:
render_logs(self.workdir)
return PantsResult(
command=self.command,
exit_code=self.process.returncode,
stdout=stdout.decode(),
stderr=stderr.decode(),
workdir=self.workdir,
pid=self.process.pid,
)
def run_pants_with_workdir_without_waiting(
command: Command,
*,
workdir: str,
hermetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
extra_env: Mapping[str, str] | None = None,
print_stacktrace: bool = True,
**kwargs: Any,
) -> PantsJoinHandle:
args = [
"--no-pantsrc",
f"--pants-workdir={workdir}",
f"--print-stacktrace={print_stacktrace}",
]
pantsd_in_command = "--no-pantsd" in command or "--pantsd" in command
pantsd_in_config = config and "GLOBAL" in config and "pantsd" in config["GLOBAL"]
if not pantsd_in_command and not pantsd_in_config:
args.append("--pantsd" if use_pantsd else "--no-pantsd")
if hermetic:
args.append("--pants-config-files=[]")
if config:
toml_file_name = os.path.join(workdir, "pants.toml")
with safe_open(toml_file_name, mode="w") as fp:
fp.write(TomlSerializer(config).serialize())
args.append(f"--pants-config-files={toml_file_name}")
pants_script = [sys.executable, "-m", "pants"]
# Permit usage of shell=True and string-based commands to allow e.g. `./pants | head`.
pants_command: Command
if kwargs.get("shell") is True:
assert not isinstance(command, list), "must pass command as a string when using shell=True"
pants_command = " ".join([*pants_script, " ".join(args), command])
else:
pants_command = [*pants_script, *args, *command]
# Only allow-listed entries will be included in the environment if hermetic=True. Note that
# the env will already be fairly hermetic thanks to the v2 engine; this provides an
# additional layer of hermiticity.
if hermetic:
# With an empty environment, we would generally get the true underlying system default
# encoding, which is unlikely to be what we want (it's generally ASCII, still). So we
# explicitly set an encoding here.
env = {"LC_ALL": "en_US.UTF-8"}
# Apply our allowlist.
for h in (
"HOME",
"PATH", # Needed to find Python interpreters and other binaries.
"PANTS_PROFILE",
"RUN_PANTS_FROM_PEX",
):
value = os.getenv(h)
if value is not None:
env[h] = value
hermetic_env = os.getenv("HERMETIC_ENV")
if hermetic_env:
for h in hermetic_env.strip(",").split(","):
value = os.getenv(h)
if value is not None:
env[h] = value
else:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
env.update(PYTHONPATH=os.pathsep.join(sys.path))
# Pants command that was called from the test shouldn't have a parent.
if "PANTS_PARENT_BUILD_ID" in env:
del env["PANTS_PARENT_BUILD_ID"]
return PantsJoinHandle(
command=pants_command,
process=subprocess.Popen(
pants_command,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,
),
workdir=workdir,
)
def run_pants_with_workdir(
command: Command,
*,
workdir: str,
hermetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
stdin_data: bytes | str | None = None,
tee_output: bool = False,
**kwargs: Any,
) -> PantsResult:
if config:
kwargs["config"] = config
handle = run_pants_with_workdir_without_waiting(
command, workdir=workdir, hermetic=hermetic, use_pantsd=use_pantsd, **kwargs
)
return handle.join(stdin_data=stdin_data, tee_output=tee_output)
def run_pants(
command: Command,
*,
hermetic: bool = True,
use_pantsd: bool = True,
config: Mapping | None = None,
extra_env: Mapping[str, str] | None = None,
stdin_data: bytes | str | None = None,
**kwargs: Any,
) -> PantsResult:
"""Runs Pants in a subprocess.
:param command: A list of command line arguments coming after `./pants`.
:param hermetic: If hermetic, your actual `pants.toml` will not be used.
:param use_pantsd: If True, the Pants process will use pantsd.
:param config: Optional data for a generated TOML file. A map of <section-name> ->
map of key -> value.
:param extra_env: Set these env vars in the Pants process's environment.
:param stdin_data: Make this data available to be read from the process's stdin.
:param kwargs: Extra keyword args to pass to `subprocess.Popen`.
"""
with temporary_workdir() as workdir:
return run_pants_with_workdir(
command,
workdir=workdir,
hermetic=hermetic,
use_pantsd=use_pantsd,
config=config,
stdin_data=stdin_data,
extra_env=extra_env,
**kwargs,
)
# -----------------------------------------------------------------------------------------------
# Environment setup.
# -----------------------------------------------------------------------------------------------
@contextmanager
def setup_tmpdir(files: Mapping[str, str]) -> Iterator[str]:
"""Create a temporary directory with the given files and return the tmpdir (relative to the
build root).
The `files` parameter is a dictionary of file paths to content. All file paths will be prefixed
with the tmpdir. The file content can use `{tmpdir}` to have it substituted with the actual
tmpdir via a format string.
This is useful to set up controlled test environments, such as setting up source files and
BUILD files.
"""
with temporary_dir(root_dir=get_buildroot()) as tmpdir:
rel_tmpdir = os.path.relpath(tmpdir, get_buildroot())
for path, content in files.items():
safe_file_dump(
os.path.join(tmpdir, path), content.format(tmpdir=rel_tmpdir), makedirs=True
)
yield rel_tmpdir
@contextmanager
def temporary_workdir(cleanup: bool = True) -> Iterator[str]:
# We can hard-code '.pants.d' here because we know that will always be its value
# in the pantsbuild/pants repo (e.g., that's what we .gitignore in that repo).
# Grabbing the pants_workdir config would require this pants's config object,
# which we don't have a reference to here.
root = os.path.join(get_buildroot(), ".pants.d", "tmp")
safe_mkdir(root)
with temporary_dir(root_dir=root, cleanup=cleanup, suffix=".pants.d") as tmpdir:
yield tmpdir
# -----------------------------------------------------------------------------------------------
# Pantsd and logs.
# -----------------------------------------------------------------------------------------------
def kill_daemon(pid_dir=None):
args = ["./pants"]
if pid_dir:
args.append(f"--pants-subprocessdir={pid_dir}")
pantsd_client = PantsDaemonClient(
OptionsBootstrapper.create(env=os.environ, args=args, allow_pantsrc=False).bootstrap_options
)
with pantsd_client.lifecycle_lock:
pantsd_client.terminate()
def ensure_daemon(func):
"""A decorator to assist with running tests with and without the daemon enabled."""
return pytest.mark.parametrize("use_pantsd", [True, False])(func)
def render_logs(workdir: str) -> None:
"""Renders all potentially relevant logs from the given workdir to stdout."""
filenames = list(glob.glob(os.path.join(workdir, "logs/exceptions*log"))) + list(
glob.glob(os.path.join(workdir, "pants.log"))
)
for filename in filenames:
rel_filename = fast_relpath(filename, workdir)
print(f"{rel_filename} +++ ")
for line in _read_log(filename):
print(f"{rel_filename} >>> {line}")
print(f"{rel_filename} --- ")
def read_pants_log(workdir: str) -> Iterator[str]:
"""Yields all lines from the pants log under the given workdir."""
# Surface the pants log for easy viewing via pytest's `-s` (don't capture stdio) option.
yield from _read_log(f"{workdir}/pants.log")
def _read_log(filename: str) -> Iterator[str]:
with open(filename) as f:
for line in f:
yield line.rstrip()
| |
"""Neovim TKinter UI."""
# EXAMPLE FROM TATRRUIDA
import sys
from Tkinter import Canvas, Tk
from collections import deque
from threading import Thread
# import StringIO, cProfile, pstats
from neovim import attach
from tkFont import Font
SPECIAL_KEYS = {
'Escape': 'Esc',
'Return': 'CR',
'BackSpace': 'BS',
'Prior': 'PageUp',
'Next': 'PageDown',
'Delete': 'Del',
}
if sys.version_info < (3, 0):
range = xrange
class NvimTk(object):
"""Wraps all nvim/tk event handling."""
def __init__(self, nvim):
"""Initialize with a Nvim instance."""
self._nvim = nvim
self._attrs = {}
self._nvim_updates = deque()
self._canvas = None
self._fg = '#000000'
self._bg = '#ffffff'
def run(self):
"""Start the UI."""
self._tk_setup()
t = Thread(target=self._nvim_event_loop)
t.daemon = True
t.start()
self._root.mainloop()
def _tk_setup(self):
self._root = Tk()
self._root.bind('<<nvim_redraw>>', self._tk_nvim_redraw)
self._root.bind('<<nvim_detach>>', self._tk_nvim_detach)
self._root.bind('<Key>', self._tk_key)
def _tk_nvim_redraw(self, *args):
update = self._nvim_updates.popleft()
for update in update:
handler = getattr(self, '_tk_nvim_' + update[0])
for args in update[1:]:
handler(*args)
def _tk_nvim_detach(self, *args):
self._root.destroy()
def _tk_nvim_resize(self, width, height):
self._tk_redraw_canvas(width, height)
def _tk_nvim_clear(self):
self._tk_clear_region(0, self._height - 1, 0, self._width - 1)
def _tk_nvim_eol_clear(self):
row, col = (self._cursor_row, self._cursor_col,)
self._tk_clear_region(row, row, col, self._scroll_right)
def _tk_nvim_cursor_goto(self, row, col):
self._cursor_row = row
self._cursor_col = col
def _tk_nvim_cursor_on(self):
pass
def _tk_nvim_cursor_off(self):
pass
def _tk_nvim_mouse_on(self):
pass
def _tk_nvim_mouse_off(self):
pass
def _tk_nvim_insert_mode(self):
pass
def _tk_nvim_normal_mode(self):
pass
def _tk_nvim_set_scroll_region(self, top, bot, left, right):
self._scroll_top = top
self._scroll_bot = bot
self._scroll_left = left
self._scroll_right = right
def _tk_nvim_scroll(self, count):
top, bot = (self._scroll_top, self._scroll_bot,)
left, right = (self._scroll_left, self._scroll_right,)
if count > 0:
destroy_top = top
destroy_bot = top + count - 1
move_top = destroy_bot + 1
move_bot = bot
fill_top = move_bot + 1
fill_bot = fill_top + count - 1
else:
destroy_top = bot + count + 1
destroy_bot = bot
move_top = top
move_bot = destroy_top - 1
fill_bot = move_top - 1
fill_top = fill_bot + count + 1
# destroy items that would be moved outside the scroll region after
# scrolling
# self._tk_clear_region(destroy_top, destroy_bot, left, right)
# self._tk_clear_region(move_top, move_bot, left, right)
self._tk_destroy_region(destroy_top, destroy_bot, left, right)
self._tk_tag_region('move', move_top, move_bot, left, right)
self._canvas.move('move', 0, -count * self._rowsize)
self._canvas.dtag('move', 'move')
# self._tk_fill_region(fill_top, fill_bot, left, right)
def _tk_nvim_highlight_set(self, attrs):
self._attrs = attrs
def _tk_nvim_put(self, data):
# choose a Font instance
font = self._fnormal
if self._attrs.get('bold', False):
font = self._fbold
if self._attrs.get('italic', False):
font = self._fbolditalic if font == self._fbold else self._fitalic
# colors
fg = "#{0:0{1}x}".format(self._attrs.get('foreground', self._fg), 6)
bg = "#{0:0{1}x}".format(self._attrs.get('background', self._bg), 6)
# get the "text" and "rect" which correspond to the current cell
x, y = self._tk_get_coords(self._cursor_row, self._cursor_col)
items = self._canvas.find_overlapping(x, y, x + 1, y + 1)
if len(items) != 2:
# caught part the double-width character in the cell to the left,
# filter items which dont have the same horizontal coordinate as
# "x"
predicate = lambda item: self._canvas.coords(item)[0] == x
items = filter(predicate, items)
# rect has lower id than text, sort to unpack correctly
rect, text = sorted(items)
self._canvas.itemconfig(text, fill=fg, font=font, text=data or ' ')
self._canvas.itemconfig(rect, fill=bg)
self._tk_nvim_cursor_goto(self._cursor_row, self._cursor_col + 1)
def _tk_nvim_bell(self):
self._root.bell()
def _tk_nvim_update_fg(self, fg):
self._fg = "#{0:0{1}x}".format(fg, 6)
def _tk_nvim_update_bg(self, bg):
self._bg = "#{0:0{1}x}".format(bg, 6)
def _tk_redraw_canvas(self, width, height):
if self._canvas:
self._canvas.destroy()
self._fnormal = Font(family='Monospace', size=13)
self._fbold = Font(family='Monospace', weight='bold', size=13)
self._fitalic = Font(family='Monospace', slant='italic', size=13)
self._fbolditalic = Font(family='Monospace', weight='bold',
slant='italic', size=13)
self._colsize = self._fnormal.measure('A')
self._rowsize = self._fnormal.metrics('linespace')
self._canvas = Canvas(self._root, width=self._colsize * width,
height=self._rowsize * height)
self._tk_fill_region(0, height - 1, 0, width - 1)
self._cursor_row = 0
self._cursor_col = 0
self._scroll_top = 0
self._scroll_bot = height - 1
self._scroll_left = 0
self._scroll_right = width - 1
self._width, self._height = (width, height,)
self._canvas.pack()
def _tk_fill_region(self, top, bot, left, right):
# create columns from right to left so the left columns have a
# higher z-index than the right columns. This is required to
# properly display characters that cross cell boundary
for rownum in range(bot, top - 1, -1):
for colnum in range(right, left - 1, -1):
x1 = colnum * self._colsize
y1 = rownum * self._rowsize
x2 = (colnum + 1) * self._colsize
y2 = (rownum + 1) * self._rowsize
# for each cell, create two items: The rectangle is used for
# filling background and the text is for cell contents.
self._canvas.create_rectangle(x1, y1, x2, y2,
fill=self._bg, width=0)
self._canvas.create_text(x1, y1, anchor='nw',
font=self._fnormal, width=1,
fill=self._fg, text=' ')
def _tk_clear_region(self, top, bot, left, right):
self._tk_tag_region('clear', top, bot, left, right)
self._canvas.itemconfig('clear', fill=self._bg)
self._canvas.dtag('clear', 'clear')
def _tk_destroy_region(self, top, bot, left, right):
self._tk_tag_region('destroy', top, bot, left, right)
self._canvas.delete('destroy')
self._canvas.dtag('destroy', 'destroy')
def _tk_tag_region(self, tag, top, bot, left, right):
x1, y1 = self._tk_get_coords(top, left)
x2, y2 = self._tk_get_coords(bot, right)
self._canvas.addtag_overlapping(tag, x1, y1, x2 + 1, y2 + 1)
def _tk_get_coords(self, row, col):
x = col * self._colsize
y = row * self._rowsize
return x, y
def _tk_key(self, event):
if 0xffe1 <= event.keysym_num <= 0xffee:
# this is a modifier key, ignore. Source:
# https://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
return
# Translate to Nvim representation of keys
send = []
if event.state & 0x1:
send.append('S')
if event.state & 0x4:
send.append('C')
if event.state & (0x8 | 0x80):
send.append('A')
special = len(send) > 0
key = event.char
if _is_invalid_key(key):
special = True
key = event.keysym
send.append(SPECIAL_KEYS.get(key, key))
send = '-'.join(send)
if special:
send = '<' + send + '>'
nvim = self._nvim
nvim.session.threadsafe_call(lambda: nvim.input(send))
def _nvim_event_loop(self):
self._nvim.session.run(self._nvim_request,
self._nvim_notification,
lambda: self._nvim.attach_ui(80, 24))
self._root.event_generate('<<nvim_detach>>', when='tail')
def _nvim_request(self, method, args):
raise Exception('This UI does not implement any methods')
def _nvim_notification(self, method, args):
if method == 'redraw':
self._nvim_updates.append(args)
self._root.event_generate('<<nvim_redraw>>', when='tail')
def _is_invalid_key(c):
try:
return len(c.decode('utf-8')) != 1 or ord(c[0]) < 0x20
except UnicodeDecodeError:
return True
nvim = attach('child', argv=['../neovim/build/bin/nvim', '--embed'])
ui = NvimTk(nvim)
# pr = cProfile.Profile()
# pr.enable()
ui.run()
# pr.disable()
# s = StringIO.StringIO()
# ps = pstats.Stats(pr, stream=s)
# ps.strip_dirs().sort_stats('ncalls').print_stats(15)
# print s.getvalue()
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import disposable_email_domains
import wtforms
import wtforms.fields.html5
import jinja2
from warehouse import forms
from warehouse.accounts.interfaces import TooManyFailedLogins
from warehouse.accounts.models import DisableReason
from warehouse.email import send_password_compromised_email
class UsernameMixin:
username = wtforms.StringField(validators=[wtforms.validators.DataRequired()])
def validate_username(self, field):
userid = self.user_service.find_userid(field.data)
if userid is None:
raise wtforms.validators.ValidationError("No user found with that username")
class NewUsernameMixin:
username = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.Length(
max=50, message=("Choose a username with 50 characters or less.")
),
# the regexp below must match the CheckConstraint
# for the username field in accounts.models.User
wtforms.validators.Regexp(
r"^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$",
message=(
"The username is invalid. Usernames "
"must be composed of letters, numbers, "
"dots, hyphens and underscores. And must "
"also start and finish with a letter or number. "
"Choose a different username."
),
),
]
)
def validate_username(self, field):
if self.user_service.find_userid(field.data) is not None:
raise wtforms.validators.ValidationError(
"This username is already being used by another "
"account. Choose a different username."
)
class PasswordMixin:
password = wtforms.PasswordField(validators=[wtforms.validators.DataRequired()])
def __init__(self, *args, check_password_metrics_tags=None, **kwargs):
self._check_password_metrics_tags = check_password_metrics_tags
super().__init__(*args, **kwargs)
def validate_password(self, field):
userid = self.user_service.find_userid(self.username.data)
if userid is not None:
try:
if not self.user_service.check_password(
userid, field.data, tags=self._check_password_metrics_tags
):
raise wtforms.validators.ValidationError(
"The password is invalid. Try again."
)
except TooManyFailedLogins:
raise wtforms.validators.ValidationError(
"There have been too many unsuccessful login attempts, "
"try again later."
) from None
class NewPasswordMixin:
new_password = wtforms.PasswordField(
validators=[
wtforms.validators.DataRequired(),
forms.PasswordStrengthValidator(
user_input_fields=["full_name", "username", "email"]
),
]
)
password_confirm = wtforms.PasswordField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.EqualTo(
"new_password", "Your passwords don't match. Try again."
),
]
)
# These fields are here to provide the various user-defined fields to the
# PasswordStrengthValidator of the new_password field, to ensure that the
# newly set password doesn't contain any of them
full_name = wtforms.StringField() # May be empty
username = wtforms.StringField(validators=[wtforms.validators.DataRequired()])
email = wtforms.StringField(validators=[wtforms.validators.DataRequired()])
def __init__(self, *args, breach_service, **kwargs):
super().__init__(*args, **kwargs)
self._breach_service = breach_service
def validate_new_password(self, field):
if self._breach_service.check_password(
field.data, tags=["method:new_password"]
):
raise wtforms.validators.ValidationError(
jinja2.Markup(self._breach_service.failure_message)
)
class NewEmailMixin:
email = wtforms.fields.html5.EmailField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.Email(
message=("The email address isn't valid. Try again.")
),
]
)
def validate_email(self, field):
if self.user_service.find_userid_by_email(field.data) is not None:
raise wtforms.validators.ValidationError(
"This email address is already being used by another account. "
"Use a different email."
)
domain = field.data.split("@")[-1]
if domain in disposable_email_domains.blacklist:
raise wtforms.validators.ValidationError(
"You can't create an account with an email address "
"from this domain. Use a different email."
)
class HoneypotMixin:
""" A mixin to catch spammers. This field should always be blank """
confirm_form = wtforms.StringField()
class RegistrationForm(
NewUsernameMixin, NewEmailMixin, NewPasswordMixin, HoneypotMixin, forms.Form
):
full_name = wtforms.StringField(
validators=[
wtforms.validators.Length(
max=100,
message=(
"The name is too long. "
"Choose a name with 100 characters or less."
),
)
]
)
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
class LoginForm(PasswordMixin, UsernameMixin, forms.Form):
def __init__(self, *args, request, user_service, breach_service, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
self.user_service = user_service
self.breach_service = breach_service
def validate_password(self, field):
# Before we try to validate the user's password, we'll first to check to see if
# they are disabled.
userid = self.user_service.find_userid(self.username.data)
if userid is not None:
is_disabled, disabled_for = self.user_service.is_disabled(userid)
if is_disabled and disabled_for == DisableReason.CompromisedPassword:
raise wtforms.validators.ValidationError(
jinja2.Markup(self.breach_service.failure_message)
)
# Do our typical validation of the password.
super().validate_password(field)
# If we have a user ID, then we'll go and check it against our breached password
# service. If the password has appeared in a breach or is otherwise compromised
# we will disable the user and reject the login.
if userid is not None:
if self.breach_service.check_password(
field.data, tags=["method:auth", "auth_method:login_form"]
):
user = self.user_service.get_user(userid)
send_password_compromised_email(self.request, user)
self.user_service.disable_password(
user.id, reason=DisableReason.CompromisedPassword
)
raise wtforms.validators.ValidationError(
jinja2.Markup(self.breach_service.failure_message)
)
class RequestPasswordResetForm(forms.Form):
username_or_email = wtforms.StringField(
validators=[wtforms.validators.DataRequired()]
)
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
def validate_username_or_email(self, field):
username_or_email = self.user_service.get_user_by_username(field.data)
if username_or_email is None:
username_or_email = self.user_service.get_user_by_email(field.data)
if username_or_email is None:
raise wtforms.validators.ValidationError(
"No user found with that username or email"
)
class ResetPasswordForm(NewPasswordMixin, forms.Form):
pass
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import httplib
import logging
import os
import tempfile
import time
from pylib import android_commands
from pylib import constants
from pylib import ports
from pylib.chrome_test_server_spawner import SpawningServer
from pylib.flag_changer import FlagChanger
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device.
A subclass should implement RunTests() with no parameter, so that calling
the Run() method will set up tests, run them and tear them down.
"""
def __init__(self, device, tool, build_type):
"""
Args:
device: Tests will run on the device of this ID.
shard_index: Index number of the shard on which the test suite will run.
build_type: 'Release' or 'Debug'.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
self.tool = CreateTool(tool, self.adb)
self._http_server = None
self._forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
self.flags.AddFlags(['--disable-fre'])
self._spawning_server = None
self._spawner_forwarder = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self.build_type = build_type
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.adb.SetFileContents(self.adb.GetExternalStorage() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port,
self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing:
(base_test_result.TestRunResults, tests to rerun or None)
"""
raise NotImplementedError
def PushDependencies(self):
"""Push all dependencies to device once before all tests are run."""
pass
def SetUp(self):
"""Run once before all tests are run."""
Forwarder.KillDevice(self.adb, self.tool)
self.PushDependencies()
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|CHROME_DIR|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(constants.CHROME_DIR, p),
os.path.join(dest_dir, p))
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self.StartForwarderForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _CreateAndRunForwarder(
self, adb, port_pairs, tool, host_name, build_type):
"""Creates and run a forwarder."""
forwarder = Forwarder(adb, build_type)
forwarder.Run(port_pairs, tool, host_name)
return forwarder
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
if self._forwarder:
self._forwarder.Close()
self._forwarder = self._CreateAndRunForwarder(
self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self.StartForwarder([(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
Forwarder.KillDevice(self.adb, self.tool)
if self._forwarder:
self._forwarder.Close()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def CleanupSpawningServerState(self):
"""Tells the spawning server to clean up any state.
If the spawning server is reused for multiple tests, this should be called
after each test to prevent tests affecting each other.
"""
if self._spawning_server:
self._spawning_server.CleanupState()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# Try 3 times to launch test spawner server.
for i in xrange(0, 3):
# Do not allocate port for test server here. We will allocate
# different port for individual test in TestServerThread.
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.adb,
self.tool,
self.build_type)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
self._spawner_forwarder = self._CreateAndRunForwarder(
self.adb,
[(self.test_server_spawner_port, self.test_server_spawner_port)],
self.tool, '127.0.0.1', self.build_type)
| |
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import dateutil.parser
import dateutil.tz
import re
import sys
import logging
log = logging.getLogger(__name__)
_tzutc = dateutil.tz.tzutc()
def parseDate(value):
return dateutil.parser.parse(value, default=datetime.datetime.now().replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0,
tzinfo=_tzutc))
class InvalidData(Exception):
def __init__(self, *args, **kwargs):
msg = kwargs.pop('msg', None)
super(InvalidData, self).__init__(*args, **kwargs)
self.msg = msg
class Operator(object):
filterTerm = None
operator = None
description = None
arity = 2
# Variable length arguments
ARITY_VAROP = object()
ARITY_VAR = object()
# This may look weird, but we need two backslashes when trying to
# match a single one, for escaping reasons
_singleBackslashRe = re.compile(r'\\')
def __init__(self, *operands):
self.operands = list(operands)
def addOperand(self, operand):
self.operands.append(operand)
def asString(self):
return "%s(%s)" % (self.filterTerm,
','.join((hasattr(x, 'asString') and x.asString() or self._quote(x))
for x in self.operands))
@classmethod
def _quote(cls, s):
s = cls._singleBackslashRe.sub(r'\\\\', s)
slen = len(s)
s = s.replace('"', r'\"')
if len(s) != slen:
# We've replaced something
s = '"%s"' % s
return s
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not isinstance(self, other.__class__):
return False
if len(self.operands) != len(other.operands):
return False
for ssub, osub in zip(self.operands, other.operands):
if ssub != osub:
return False
return True
def __ne__(self, other):
return not (self == other)
def castValue(self, value):
return value
def expression(self, model):
field = None
if self.arity == 2:
(field, value) = self.operands
elif self.arity is self.ARITY_VAROP:
(field, value) = self.operands[0], self.operands[1:]
elif self.arity is self.ARITY_VAR:
if not self.operands:
raise Exception("Operator expected at least one argument")
exprs = [ x.expression(model) for x in self.operands ]
value = None
for v in exprs:
if value is None:
value = v
else:
value = getattr(value, self.operator)(v)
return value
else:
raise Exception("Unsupported arity %s" % self.arity)
column = model.__table__.columns.get(field, None)
if column is None:
raise InvalidData(msg="Invalid column %s" % field)
columnInstr = getattr(model, field)
if column.type.__class__.__name__ == 'DateTime':
try:
value = parseDate(value)
except (TypeError, ValueError), e:
raise InvalidData(msg="Invalid time specification '%s'" %
value)
func = getattr(columnInstr, self.operator)
return func(value)
class BooleanOperator(Operator):
def castValue(self, value):
if value.lower() == 'false':
return False
return True
class InOperator(Operator):
filterTerm = 'IN'
operator = 'in_'
description = 'In list'
arity = Operator.ARITY_VAROP
class NotInOperator(InOperator):
filterTerm = 'notin_'
description = 'Not in list'
class NullOperator(BooleanOperator):
filterTerm = 'IS_NULL'
operator = 'is_'
description = 'Is NULL'
class EqualOperator(Operator):
filterTerm = 'EQ'
operator = '__eq__'
description = 'Equal to'
class NotEqualOperator(EqualOperator):
filterTerm = 'NE'
operator = '__ne__'
description = 'Not equal to'
class LessThanOperator(Operator):
filterTerm = 'LT'
operator = '__lt__'
description = 'Less than'
class LessThanEqualOperator(Operator):
filterTerm = 'LE'
operator = '__le__'
description = 'Less than or equal to'
class GreaterThanOperator(Operator):
filterTerm = 'GT'
operator = '__gt__'
description = 'Greater than'
class GreaterThanEqualOperator(Operator):
filterTerm = 'GE'
operator = '__ge__'
description = 'Greater than or equal to'
class LikeOperator(Operator):
filterTerm = 'LIKE'
operator = 'like'
description = 'Like'
class NotLikeOperator(LikeOperator):
filterTerm = 'NOT_LIKE'
operator = 'notlike'
description = 'Not like'
class ContainsOperator(Operator):
filterTerm = 'CONTAINS'
operator = None
description = "Contains"
arity = Operator.ARITY_VAROP
class AndOperator(Operator):
filterTerm = 'AND'
operator = '__and__'
description = "And"
arity = Operator.ARITY_VAR
class OrOperator(Operator):
filterTerm = 'OR'
operator = '__or__'
description = "Or"
arity = Operator.ARITY_VAR
def operatorFactory(operator):
return operatorMap[operator]
class Lexer(object):
"""
Class used for parsing a query tree.
The general syntax is, in BNF-like syntax:
optree ::== OPERATOR(operand[,operand*])
OPERATOR ::== (word)
operand ::== string | quotedstring | optree
string ::== (pretty obvious)
quotedstring :== " | string | "
Strings MUST be quoted if they contain a quote (which must be escaped with
a backslash), paranthesis or commas. Simple words do not have to be quoted,
as they do not break the parser. Backslashes have to be doubled up within
quotes.
Example of operands that evaluate to strings::
simple word
"quoted words"
"an embedded \"quote\" and an escaped \\ (backslash)"
Note that semicolons will have to be URL-escaped before the query is passed
in the URL.
"""
_doubleBackslash = r'\\\\'
_convertedDoubleBackslash = u'\u0560'
_escaped = re.compile(_doubleBackslash)
_unescaped = re.compile(_convertedDoubleBackslash)
# .*? means non-greedy expansion, to avoid skipping over separators
_startSep = re.compile(r'^(?P<head>.*?)(?P<sep>(\(|\)|,|(?<!\\)"))(?P<tail>.*)$')
_endQuote = re.compile(r'^(?P<head>.*?)(?P<sep>(?<!\\)")(?P<tail>.*)$')
def scan(self, s):
return self._split(s)
@classmethod
def _split(cls, code):
# The stack contains only tree nodes. Literal nodes are added as
# operands directly to the last tree node in the stack.
stack = []
# First pass: we replace all double-backslashes with a
# non-ascii unicode char, to simplify the regular expressions
# _unescape will then revert this operation
escCode = cls._escaped.sub(cls._convertedDoubleBackslash, code).strip()
# There are only 2 states to worry about.
# We look for a separator that is either ( , ) or " (unescaped,
# hence the negative look-ahead in the regex)
# If an (unescaped) quote is found, we need to find its matching
# (unescaped) quote, which is the sep == '"' case.
while escCode:
m = cls._startSep.match(escCode)
if m is None:
raise InvalidData(msg="Unable to parse %s" % code)
g = m.groupdict()
head, sep, tail = g['head'], g['sep'], g['tail']
# Get rid of leading whitespaces, unless the string is
# quoted
if sep != '"':
escCode = tail.lstrip()
else:
escCode = tail
if sep == '(':
# New operator found.
op = cls._unescape(head.strip()).upper()
opFactory = operatorMap.get(op, None)
if opFactory is None:
raise InvalidData(msg="Unknown operator %s" % op)
tree = opFactory()
if stack:
# Add the tree node to the parent (using the stack)
cls._addOperand(stack, tree)
# ... and we push it onto the stack
stack.append(tree)
continue
if sep == '"':
# Ignore everything but a close quote
m = cls._endQuote.match(escCode)
if m:
g = m.groupdict()
head, sep, tail = g['head'], g['sep'], g['tail']
escCode = tail.lstrip()
cls._addOperand(stack, cls._unescapeString(head))
continue
raise InvalidData(msg="Closing quote not found")
if head:
cls._addOperand(stack, cls._unescape(head.strip()))
if sep == ',':
continue
assert sep == ')'
top = stack.pop()
if not stack:
if escCode != '':
raise InvalidData(msg="Garbage found at the end of the expression: '%s'" % escCode)
return top
@classmethod
def _addOperand(cls, stack, child):
top = stack[-1]
assert isinstance(top, Operator)
top.addOperand(child)
@classmethod
def _unescape(cls, s):
return cls._unescaped.sub(r'\\', s).encode('ascii')
@classmethod
def _unescapeString(cls, s):
s = s.replace(r'\"', '"')
return cls._unescape(s)
operatorMap = {}
for mod_obj in sys.modules[__name__].__dict__.values():
if hasattr(mod_obj, 'filterTerm'):
operatorMap[mod_obj.filterTerm] = mod_obj
| |
#!/usr/bin/env python3
import os
import re
import sys
from io import StringIO
from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE
from plex.traditional import re as Re
class MyScanner(Scanner):
def __init__(self, info, name='<default>'):
Scanner.__init__(self, self.lexicon, info, name)
def begin(self, state_name):
Scanner.begin(self, state_name)
def sep_seq(sequence, sep):
pat = Str(sequence[0])
for s in sequence[1:]:
pat += sep + Str(s)
return pat
def runScanner(data, scanner_class, lexicon=None):
info = StringIO(data)
outfo = StringIO()
if lexicon is not None:
scanner = scanner_class(lexicon, info)
else:
scanner = scanner_class(info)
while True:
value, text = scanner.read()
if value is None:
break
elif value is IGNORE:
pass
else:
outfo.write(value)
return outfo.getvalue(), scanner
class LenSubsScanner(MyScanner):
"""Following clapack, we remove ftnlen arguments, which f2c puts after
a char * argument to hold the length of the passed string. This is just
a nuisance in C.
"""
def __init__(self, info, name='<ftnlen>'):
MyScanner.__init__(self, info, name)
self.paren_count = 0
def beginArgs(self, text):
if self.paren_count == 0:
self.begin('args')
self.paren_count += 1
return text
def endArgs(self, text):
self.paren_count -= 1
if self.paren_count == 0:
self.begin('')
return text
digits = Re('[0-9]+')
iofun = Re(r'\([^;]*;')
decl = Re(r'\([^)]*\)[,;'+'\n]')
any = Re('[.]*')
S = Re('[ \t\n]*')
cS = Str(',') + S
len_ = Re('[a-z][a-z0-9]*_len')
iofunctions = Str("s_cat", "s_copy", "s_stop", "s_cmp",
"i_len", "do_fio", "do_lio") + iofun
# Routines to not scrub the ftnlen argument from
keep_ftnlen = (Str('ilaenv_') | Str('iparmq_') | Str('s_rnge')) + Str('(')
lexicon = Lexicon([
(iofunctions, TEXT),
(keep_ftnlen, beginArgs),
State('args', [
(Str(')'), endArgs),
(Str('('), beginArgs),
(AnyChar, TEXT),
]),
(cS+Re(r'[1-9][0-9]*L'), IGNORE),
(cS+Str('ftnlen')+Opt(S+len_), IGNORE),
(cS+sep_seq(['(', 'ftnlen', ')'], S)+S+digits, IGNORE),
(Bol+Str('ftnlen ')+len_+Str(';\n'), IGNORE),
(cS+len_, TEXT),
(AnyChar, TEXT),
])
def scrubFtnlen(source):
return runScanner(source, LenSubsScanner)[0]
def cleanSource(source):
# remove whitespace at end of lines
source = re.sub(r'[\t ]+\n', '\n', source)
# remove comments like .. Scalar Arguments ..
source = re.sub(r'(?m)^[\t ]*/\* *\.\. .*?\n', '', source)
# collapse blanks of more than two in-a-row to two
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
class LineQueue:
def __init__(self):
object.__init__(self)
self._queue = []
def add(self, line):
self._queue.append(line)
def clear(self):
self._queue = []
def flushTo(self, other_queue):
for line in self._queue:
other_queue.add(line)
self.clear()
def getValue(self):
q = LineQueue()
self.flushTo(q)
s = ''.join(q._queue)
self.clear()
return s
class CommentQueue(LineQueue):
def __init__(self):
LineQueue.__init__(self)
def add(self, line):
if line.strip() == '':
LineQueue.add(self, '\n')
else:
line = ' ' + line[2:-3].rstrip() + '\n'
LineQueue.add(self, line)
def flushTo(self, other_queue):
if len(self._queue) == 0:
pass
elif len(self._queue) == 1:
other_queue.add('/*' + self._queue[0][2:].rstrip() + ' */\n')
else:
other_queue.add('/*\n')
LineQueue.flushTo(self, other_queue)
other_queue.add('*/\n')
self.clear()
# This really seems to be about 4x longer than it needs to be
def cleanComments(source):
lines = LineQueue()
comments = CommentQueue()
def isCommentLine(line):
return line.startswith('/*') and line.endswith('*/\n')
blanks = LineQueue()
def isBlank(line):
return line.strip() == ''
def SourceLines(line):
if isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
lines.add(line)
return SourceLines
def HaveCommentLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
lines.add(line)
return SourceLines
def HaveBlankLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
blanks.flushTo(comments)
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
blanks.flushTo(lines)
lines.add(line)
return SourceLines
state = SourceLines
for line in StringIO(source):
state = state(line)
comments.flushTo(lines)
return lines.getValue()
def removeHeader(source):
lines = LineQueue()
def LookingForHeader(line):
m = re.match(r'/\*[^\n]*-- translated', line)
if m:
return InHeader
else:
lines.add(line)
return LookingForHeader
def InHeader(line):
if line.startswith('*/'):
return OutOfHeader
else:
return InHeader
def OutOfHeader(line):
if line.startswith('#include "f2c.h"'):
pass
else:
lines.add(line)
return OutOfHeader
state = LookingForHeader
for line in StringIO(source):
state = state(line)
return lines.getValue()
def removeSubroutinePrototypes(source):
expression = re.compile(
r'/[*] Subroutine [*]/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?'
)
lines = LineQueue()
for line in StringIO(source):
if not expression.match(line):
lines.add(line)
return lines.getValue()
def removeBuiltinFunctions(source):
lines = LineQueue()
def LookingForBuiltinFunctions(line):
if line.strip() == '/* Builtin functions */':
return InBuiltInFunctions
else:
lines.add(line)
return LookingForBuiltinFunctions
def InBuiltInFunctions(line):
if line.strip() == '':
return LookingForBuiltinFunctions
else:
return InBuiltInFunctions
state = LookingForBuiltinFunctions
for line in StringIO(source):
state = state(line)
return lines.getValue()
def replaceDlamch(source):
"""Replace dlamch_ calls with appropriate macros"""
def repl(m):
s = m.group(1)
return dict(E='EPSILON', P='PRECISION', S='SAFEMINIMUM',
B='BASE')[s[0]]
source = re.sub(r'dlamch_\("(.*?)"\)', repl, source)
source = re.sub(r'^\s+extern.*? dlamch_.*?;$(?m)', '', source)
return source
# do it
def scrubSource(source, nsteps=None, verbose=False):
steps = [
('scrubbing ftnlen', scrubFtnlen),
('remove header', removeHeader),
('clean source', cleanSource),
('clean comments', cleanComments),
('replace dlamch_() calls', replaceDlamch),
('remove prototypes', removeSubroutinePrototypes),
('remove builtin function prototypes', removeBuiltinFunctions),
]
if nsteps is not None:
steps = steps[:nsteps]
for msg, step in steps:
if verbose:
print(msg)
source = step(source)
return source
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
with open(filename, 'r') as fo:
source = fo.read()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
else:
nsteps = None
source = scrub_source(source, nsteps, verbose=True)
writefo = open(outfilename, 'w')
writefo.write(source)
writefo.close()
| |
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.forms import modelformset_factory
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.urls import reverse
from django.views import generic
from .forms import *
from .models import *
from .control import *
# Create your views here.
def index(request):
if request.method == 'POST':
# Login
if 'login' in request.POST:
form = LoginForm (request.POST)
if form.is_valid():
email = request.POST['email'].lower()
password = request.POST['password']
user = authenticate (request, username=email, password=password)
if user is not None:
login (request, user)
request.session['lifter'] = Lifter.objects.get(email__exact=email).id
else:
messages.error(request, 'Invalid email/password combination.', extra_tags=Notification.error_class)
# Log workout
elif 'log' in request.POST:
workout = get_object_or_404(Workout, pk=request.POST['log'])
log = workout.log('COMPL')
# Edit workout
elif 'edit' in request.POST:
workout = get_object_or_404(Workout, pk=request.POST['edit'])
log = workout.log('COMPL')
return HttpResponseRedirect (reverse ('hamask:log_update', kwargs={'pk':log.id}))
# Skip workout
elif 'skip' in request.POST:
workout = get_object_or_404(Workout, pk=request.POST['skip'])
log = workout.log('SKIPD')
return HttpResponseRedirect (reverse ('hamask:index'))
else:
# If user is not authenticated, show login form
if not request.user.is_authenticated:
form = LoginForm()
return render (request, 'hamask/login.html', {'form': form})
else:
lifter = Lifter.objects.get(pk=request.session['lifter'])
workouts = lifter.get_next_workouts()
exercises = {}
last_workout = lifter.get_last_workout()
last_exercises = {}
if workouts:
for workout in workouts:
exercises[workout.id] = workout.get_workout_exercises()
if last_workout:
last_exercises[last_workout.id] = last_workout.get_exercise_log()
return render (request, 'hamask/index.html', {'workouts': workouts
, 'exercises': exercises
, 'last_workout': last_workout
, 'last_exercises': last_exercises,})
def logout_view(request):
logout(request)
return HttpResponseRedirect (reverse ('hamask:index'))
@login_required
def programs(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
programs = lifter.get_programs()
return render (request, 'hamask/programs.html', {'programs': programs})
@login_required
def program_create(request, template_name='hamask/program.html'):
form = ProgramForm(request.POST or None)
if form.is_valid():
program = form.save(commit=False)
program.lifter = Lifter.objects.get(pk=request.session['lifter'])
program.save()
if 'save' in request.POST:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
else:
return render (request, template_name, {'form': form})
@login_required
def program_update(request, pk, template_name='hamask/program.html'):
program = get_object_or_404(Program, pk=pk)
if program.lifter.id != request.session['lifter']:
raise Http404("Invalid program.")
form = ProgramForm(request.POST or None, instance=program)
if 'delete' in request.POST:
program.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:programs'))
else:
if form.is_valid():
form.save()
if 'save' in request.POST:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
elif 'add_group' in request.POST:
order = program.get_next_workout_group_order()
group = Workout_Group(program=program, name='Block ' + str(order + 1), order=order)
group.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
elif 'add_workout' in request.POST:
group = Workout_Group.objects.get(pk=request.POST['add_workout'])
order = group.get_next_workout_order()
workout = Workout(workout_group=group, name='Day ' + str(order + 1), order=order)
workout.save()
return HttpResponseRedirect (reverse ('hamask:workout_update', kwargs={'pk':workout.id}))
elif 'start' in request.POST:
program.start()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
elif 'end' in request.POST:
program.end()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
elif 'restart' in request.POST:
program.restart()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
elif 'copy' in request.POST:
new_program = program.copy_program()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':new_program.id}))
elif 'copy_group' in request.POST:
group = Workout_Group.objects.get(pk=request.POST['copy_group'])
group.copy_group(program=None)
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
else:
groups = program.get_workout_groups()
workouts = {}
exercises = {}
for group in groups:
workouts[group.id] = group.get_workouts()
for k, w in workouts.items():
for workout in w:
exercises[workout.id] = workout.get_workout_exercises()
return render (request, template_name, {'form': form
, 'program': program
, 'groups': groups
, 'workouts': workouts
, 'exercises': exercises,})
def reorder_group(request):
group = Workout_Group.objects.get(pk=request.GET.get('group_id', None))
order = request.GET.get('order', None)
data = {}
try:
if order == 'UP':
group.set_order_up()
elif order == 'DOWN':
group.set_order_down()
except ObjectDoesNotExist:
pass
else:
data = {'group_id': group.id}
return JsonResponse(data)
def delete_group(request):
group = Workout_Group.objects.get(pk=request.GET.get('group_id', None))
data = {'group_id': group.id}
group.delete()
return JsonResponse(data)
def update_group(request):
group = Workout_Group.objects.get(pk=request.GET.get('group_id', None))
group.name = request.GET.get('group_name', None)
group.save()
data = {'group_id': group.id}
return JsonResponse(data)
@login_required
def program_import(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
form = ProgramImportForm(request.POST or None)
if form.is_valid():
program = Program.objects.get(pk=form.cleaned_data['program'])
copy = program.copy_program(lifter)
copy.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:programs'))
else:
return render (request, 'hamask/program_import.html', {'form': form})
@login_required
def workout_update(request, pk, template_name='hamask/workout.html'):
workout = get_object_or_404(Workout, pk=pk)
lifter_id = request.session['lifter']
if workout.workout_group.program.lifter.id != lifter_id:
raise Http404("Invalid workout.")
# Build forms
form = WorkoutForm(request.POST or None, instance=workout, prefix='workout')
ExerciseFormset = modelformset_factory(Workout_Exercise, form=WorkoutExerciseForm, can_delete=True)
exercise_formset = ExerciseFormset(request.POST or None
, prefix='exercise'
, queryset=workout.get_workout_exercises()
, form_kwargs={'lifter': lifter_id})
if 'delete' in request.POST:
program = workout.workout_group.program
workout.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:program_update', kwargs={'pk':program.id}))
else:
if form.is_valid() and exercise_formset.is_valid():
form.save()
exercise_formset.save(commit=False)
# Update
exercises_changed = dict(exercise_formset.changed_objects)
for exercise in exercises_changed:
exercise.save()
# Create
for exercise in exercise_formset.new_objects:
exercise.workout = workout
# Manage order
if exercise.order == None:
exercise.order = workout.get_next_exercise_order()
exercise.save()
# Delete
#for exercise in exercise_formset.deleted_objects:
#exercise.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:workout_update', kwargs={'pk':workout.id}))
else:
return render (request, template_name, {'form': form
, 'exercise_formset': exercise_formset
, 'id': workout.id
, 'program_id': workout.workout_group.program.id,})
def reorder_exercise(request):
exercise = Workout_Exercise.objects.get(pk=request.GET.get('exercise_id', None))
order = request.GET.get('order', None)
data = {}
try:
if order == 'UP':
exercise.set_order_up()
elif order == 'DOWN':
exercise.set_order_down()
except ObjectDoesNotExist:
pass
else:
data = {'exercise_id': exercise.id}
return JsonResponse(data)
def delete_exercise(request):
exercise = Workout_Exercise.objects.get(pk=request.GET.get('exercise_id', None))
data = {'exercise_id': exercise.id}
exercise.delete()
return JsonResponse(data)
def update_workout_notes(request):
exercise = Workout_Exercise.objects.get(pk=request.GET.get('workout_exercise_id', None))
exercise.notes = request.GET.get('notes', None)
exercise.save()
return JsonResponse({'notes_formt': exercise.notes_formt()})
@login_required
def logs(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
if request.POST:
if 'create_log' in request.POST:
log = Workout_Log(lifter=lifter, status='COMPL')
log.save()
return HttpResponseRedirect (reverse ('hamask:log_update', kwargs={'pk':log.id}))
else:
logs = lifter.get_last_workouts()
return render (request, 'hamask/logs.html', {'logs': logs})
@login_required
def log_update(request, pk, template_name='hamask/log.html'):
lifter_id = request.session['lifter']
log = get_object_or_404(Workout_Log, pk=pk)
if log.get_lifter().id != lifter_id:
raise Http404("Invalid log.")
# Build forms
form = WorkoutLogForm(request.POST or None, instance=log, prefix='log')
ExerciseFormset = modelformset_factory(Workout_Exercise_Log, form=WorkoutExerciseLogForm, can_delete=True)
exercise_formset = ExerciseFormset(request.POST or None
, prefix='exercise_log'
, queryset=log.get_exercise_log()
, form_kwargs={'lifter': lifter_id})
if 'delete' in request.POST:
log.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:logs'))
else:
if form.is_valid() and exercise_formset.is_valid():
form.save()
exercise_formset.save(commit=False)
# Update
exercises_changed = dict(exercise_formset.changed_objects)
for exercise in exercises_changed:
exercise.save()
# Create
for exercise in exercise_formset.new_objects:
exercise.workout_log = log
if exercise.order == None:
exercise.order = log.get_next_exercise_order()
exercise.save()
# Delete
#for exercise in exercise_formset.deleted_objects:
# exercise.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:log_update', kwargs={'pk':log.id}))
else:
return render (request, template_name, {'form': form
, 'exercise_formset': exercise_formset
, 'id': log.id})
def reorder_exercise_log(request):
exercise_log = Workout_Exercise_Log.objects.get(pk=request.GET.get('exercise_log_id', None))
order = request.GET.get('order', None)
data = {}
try:
if order == 'UP':
exercise_log.set_order_up()
elif order == 'DOWN':
exercise_log.set_order_down()
except ObjectDoesNotExist:
pass
else:
data = {'exercise_log_id': exercise_log.id}
return JsonResponse(data)
def delete_exercise_log(request):
exercise_log = Workout_Exercise_Log.objects.get(pk=request.GET.get('exercise_log_id', None))
data = {'exercise_log_id': exercise_log.id}
exercise_log.delete()
return JsonResponse(data)
def update_log_notes(request):
exercise_log = Workout_Exercise_Log.objects.get(pk=request.GET.get('workout_exercise_log_id', None))
exercise_log.notes = request.GET.get('notes', None)
exercise_log.save()
return JsonResponse({'notes_formt': exercise_log.notes_formt()})
@login_required
def logs_by_exercise(request, exercise='0'):
lifter = Lifter.objects.get(pk=request.session['lifter'])
form = LogByExerciseForm(request.POST or None, lifter = lifter.id)
if request.POST:
# Create
if 'create_log' in request.POST:
log = Workout_Log(lifter=lifter, status='COMPL')
log.save()
return HttpResponseRedirect (reverse ('hamask:log_update', kwargs={'pk':log.id}))
# Search
else:
form = LogByExerciseForm(request.POST, lifter = lifter.id)
if form.is_valid():
exercise = form.cleaned_data['exercise']
return HttpResponseRedirect (reverse ('hamask:logs_by_exercise', kwargs={'exercise':exercise}))
else:
return HttpResponseRedirect (reverse ('hamask:logs_by_exercise'))
else:
if exercise != '0':
form = LogByExerciseForm(initial={'exercise': exercise}, lifter = lifter.id)
logs = lifter.get_exercise_logs(exercise=exercise)
else:
form = LogByExerciseForm(lifter = lifter.id)
logs = None
return render (request, 'hamask/logs_by_exercise.html', {'form': form, 'logs': logs})
@login_required
def next_workouts(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
programs = lifter.get_started_programs()
workouts = {}
exercises = {}
for program in programs:
workouts[program.id] = program.get_next_workouts()
for workout in workouts[program.id]:
exercises[workout.id] = workout.get_workout_exercises()
return render (request, 'hamask/next_workouts.html', {'programs': programs
, 'workouts': workouts
, 'exercises': exercises,})
@login_required
def stats(request, exercise='0'):
lifter = Lifter.objects.get(pk=request.session['lifter'])
lifter_name = lifter.first_name + ' ' + lifter.last_name
maxes = lifter.get_pl_maxes()
total = lifter.get_pl_total()
bodyweight = getattr(lifter.get_current_bodyweight(), "weight", None)
wilks = lifter.get_current_wilks()
prs = lifter.get_last_prs()
stats = None
form = StatsByExerciseForm(request.POST or None, lifter = lifter.id)
if request.POST:
if form.is_valid():
exercise = form.cleaned_data['exercise']
return HttpResponseRedirect (reverse ('hamask:stats', kwargs={'exercise':exercise}))
else:
return HttpResponseRedirect (reverse ('hamask:stats'))
else:
if exercise != '0':
form = StatsByExerciseForm(initial={'exercise': exercise}, lifter = lifter.id)
stats = lifter.get_exercise_prs(exercise_id=exercise)
else:
form = StatsByExerciseForm(lifter = lifter.id)
stats = None
return render (request, 'hamask/stats.html', {'lifter_name': lifter_name
, 'maxes': maxes
, 'prs': prs
, 'stats': stats
, 'wilks': wilks
, 'total': total
, 'bodyweight': bodyweight
, 'form': form})
@login_required
def stat_create(request, template_name='hamask/stat.html'):
lifter_id = request.session['lifter']
form = StatForm(request.POST or None
, lifter = lifter_id)
if form.is_valid():
stat = form.save(commit=False)
stat.lifter = Lifter.objects.get(pk=request.session['lifter'])
stat.save()
if 'saveadd' in request.POST:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:stat_create'))
else:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:stats'))
else:
return render (request, template_name, {'form': form})
@login_required
def stat_update(request, pk, template_name='hamask/stat.html'):
lifter_id = request.session['lifter']
lifter_stat = get_object_or_404(Lifter_Stats, pk=pk)
if lifter_stat.lifter.id != lifter_id:
raise Http404("Invalid stat.")
form = StatForm(request.POST or None
, instance=lifter_stat
, lifter = lifter_id)
if 'delete' in request.POST:
lifter_stat.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:stats'))
else:
if form.is_valid():
form.save()
if 'saveadd' in request.POST:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:stat_create'))
else:
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:stats'))
else:
return render (request, template_name, {'form': form, 'id': lifter_stat.id,})
@login_required
def all_stats(request, exercise='0'):
lifter = Lifter.objects.get(pk=request.session['lifter'])
form = StatsByExerciseForm(request.POST or None, lifter = lifter.id)
if request.POST:
if form.is_valid():
exercise = form.cleaned_data['exercise']
return HttpResponseRedirect (reverse ('hamask:all_stats', kwargs={'exercise':exercise}))
else:
return HttpResponseRedirect (reverse ('hamask:all_stats'))
else:
if exercise != '0':
form = StatsByExerciseForm(initial={'exercise': exercise}, lifter = lifter.id)
stats = lifter.get_exercise_stats(exercise_id=exercise)
else:
form = StatsByExerciseForm(lifter = lifter.id)
stats = lifter.get_stats()
return render (request, 'hamask/all_stats.html', {'form': form, 'stats': stats,})
@login_required
def max_progression(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
exercises = Exercise.get_exercises('MAIN', lifter)
data = '['
# Main lifts
for exercise in exercises:
query = lifter.get_maxes_chart(exercise)
data += Chartist.get_chartist_data(exercise.name, query) + ','
# PL Total
query = lifter.get_pl_total_chart()
data += Chartist.get_chartist_data_from_dict('Total', query) + ','
# Bodyweight
query = lifter.get_bodyweight_chart()
data += Chartist.get_chartist_data('Bodyweight', query) + ','
# Wilks
query = lifter.get_wilks_chart()
data += Chartist.get_chartist_data_from_dict('Wilks', query)
data = data + ']'
return render (request, 'hamask/max_progression.html', {'data': data})
@login_required
def work_intensity(request, pk=None):
lifter = Lifter.objects.get(pk=request.session['lifter'])
exercise = None
if pk:
exercise = get_object_or_404(Exercise, pk=pk)
if exercise:
form = WorkIntensityForm(request.POST or None, lifter = lifter.id, exercise = exercise.id)
else:
form = WorkIntensityForm(request.POST or None, lifter = lifter.id, exercise = '')
if request.POST:
if form.is_valid():
exercise = form.cleaned_data['exercise']
if exercise == '0':
return HttpResponseRedirect (reverse ('hamask:work_intensity'))
return HttpResponseRedirect (reverse ('hamask:work_intensity', kwargs={'pk': exercise}))
else:
data = ''
if exercise:
# Intensity
data = '['
query = lifter.get_exercise_intensity_chart(exercise)
data += Chartist.get_chartist_data('Intensity', query) + ','
# Volume
query = lifter.get_exercise_volume_chart(exercise)
data += Chartist.get_chartist_data('Volume', query)
data = data + ']'
return render (request, 'hamask/work_intensity.html', {'form': form, 'data': data})
@login_required
def program_intensity(request, pk=None):
lifter = Lifter.objects.get(pk=request.session['lifter'])
program = None
if pk:
program = get_object_or_404(Program, pk=pk)
if program.lifter != lifter:
raise Http404("Invalid program.")
if program:
form = ProgramIntensityForm(request.POST or None, lifter = lifter.id, program = program.id)
else:
form = ProgramIntensityForm(request.POST or None, lifter = lifter.id, program = '')
if request.POST:
if form.is_valid():
program = form.cleaned_data['program']
if program == '0':
return HttpResponseRedirect (reverse ('hamask:program_intensity'))
return HttpResponseRedirect (reverse ('hamask:program_intensity', kwargs={'pk':program}))
else:
data = ''
if program:
# Intensity
data = '['
query = program.get_intensity_chart()
data += Chartist.get_chartist_data_number('Intensity', query) + ','
# Volume
query = program.get_volume_chart()
data += Chartist.get_chartist_data_number('Volume', query)
data = data + ']'
return render (request, 'hamask/program_intensity.html', {'form': form, 'data': data})
@login_required
def profile(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
form = ProfileForm(request.POST or None, instance=lifter, prefix='lifter')
password_form = ChangePasswordForm(request.POST or None, prefix='password')
if 'save' in request.POST:
if form.is_valid():
form.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:profile'))
elif 'change_password' in request.POST:
if password_form.is_valid():
password = password_form.cleaned_data['password']
confirm = password_form.cleaned_data['confirm_password']
if password and confirm:
if password == confirm:
user = User.objects.get(username=lifter.email)
user.set_password(password)
user.save()
login (request, user)
request.session['lifter'] = lifter.id
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
else:
messages.error(request, 'Password and confirmation must match.', extra_tags=Notification.error_class)
else:
messages.error(request, 'Password and confirmation are required.', extra_tags=Notification.error_class)
return HttpResponseRedirect (reverse ('hamask:profile'))
else:
return render (request, 'hamask/profile.html', {'form': form, 'password_form': password_form})
@login_required
def bodyweight(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
logs = lifter.get_all_bodyweights()
form = BodyweightForm(request.POST or None, prefix='bodyweight')
if form.is_valid():
bodyweight = form.save(commit=False)
bodyweight.lifter = lifter
bodyweight.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:bodyweight'))
else:
return render (request, 'hamask/bodyweight.html', {'form': form, 'logs': logs})
@login_required
def bodyweight_update(request, pk, template_name='hamask/bodyweight.html'):
lifter = Lifter.objects.get(pk=request.session['lifter'])
bodyweight = get_object_or_404(Lifter_Weight, pk=pk)
if bodyweight.lifter.id != lifter.id:
raise Http404("Invalid request.")
logs = lifter.get_all_bodyweights()
form = BodyweightForm(request.POST or None, instance=bodyweight)
if 'delete' in request.POST:
bodyweight.delete()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:bodyweight'))
else:
if form.is_valid():
form.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:bodyweight'))
else:
return render (request, template_name, {'form': form, 'logs': logs, 'id': bodyweight.id})
@login_required
def custom_exercises(request, template_name='hamask/custom_exercises.html'):
lifter = get_object_or_404(Lifter, pk=request.session['lifter'])
# Build forms
ExerciseFormset = modelformset_factory(Exercise, form=CustomExerciseForm, can_delete=True)
exercise_formset = ExerciseFormset(request.POST or None, prefix='custom_exercise', queryset=Exercise.get_lifter_exercises(lifter))
if exercise_formset.is_valid():
exercise_formset.save(commit=False)
# Update
exercises_changed = dict(exercise_formset.changed_objects)
for exercise in exercises_changed:
exercise.save()
# Create
for exercise in exercise_formset.new_objects:
exercise.lifter = lifter
exercise.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:custom_exercises'))
else:
return render (request, template_name, {'exercise_formset': exercise_formset})
def delete_custom_exercise(request):
exercise = Exercise.objects.get(pk=request.GET.get('exercise_id', None))
data = {'exercise_id': exercise.id}
exercise.delete()
return JsonResponse(data)
@login_required
def rm_calculator(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
form = RmCalculatorForm(request.POST or None)
return render (request, 'hamask/rm_calculator.html', {'form': form})
def get_rm_calculator_data(request):
data = Tools.get_rm_chart_json(int(request.GET.get('weight', None)), int(request.GET.get('reps', None)))
return JsonResponse(data, safe=False)
@login_required
def meet_planner(request):
lifter = Lifter.objects.get(pk=request.session['lifter'])
meet_planner = Meet_Planner.objects.filter(lifter__exact=lifter).first()
form = MeetPlannerForm(request.POST or None, instance=meet_planner)
if request.POST:
if 'reset' in request.POST:
meet_planner.delete()
meet_planner = Meet_Planner.initialize_meet_planner(lifter)
elif 'save' in request.POST and form.is_valid():
form.save()
messages.success(request, Notification.success_message, extra_tags=Notification.success_class)
return HttpResponseRedirect (reverse ('hamask:meet_planner'))
else:
if not meet_planner:
meet_planner = Meet_Planner.initialize_meet_planner(lifter)
form = MeetPlannerForm(request.POST or None, instance=meet_planner)
converted_data = meet_planner.get_converted_data_with_unit()
return render (request, 'hamask/meet_planner.html', {'form': form, 'meet_planner': meet_planner, 'converted_data': converted_data})
| |
import asyncio
import functools
import requests
import subprocess
from io import BytesIO
from pdfjinja import PdfJinja
from PIL import Image
from secretary import Renderer
from tempfile import NamedTemporaryFile
WMS_HEADERS = {'Referer': 'http://localhost'}
LAYER_DPI = 25.4 / 0.28
class MapPrint:
def __init__(self, payload):
self._payload = payload
self._loop = asyncio.new_event_loop()
self._map_image = None
self._map = payload['attributes']['map']
self._projection = self._map['projection']
self._scale = self._map['scale']
self._center = self._map['center']
self._dpi = self._map['dpi']
self._init_bbox()
self._init_map_size()
def _init_bbox(self):
# In meters, from template
# TODO: read this value from the configuration
paint_area_width = 210.01 * 10**-3
paint_area_height = 297.0 * 10**-3
geo_width = paint_area_width * self._scale
geo_height = paint_area_height * self._scale
x, y = self._center
min_geo_x = x - (geo_width / 2.0)
min_geo_y = y - (geo_height / 2.0)
max_geo_x = min_geo_x + geo_width
max_geo_y = min_geo_y + geo_height
self._bbox = (min_geo_x, min_geo_y, max_geo_x, max_geo_y)
def _init_map_size(self):
width = self._bbox[2] - self._bbox[0]
height = self._bbox[3] - self._bbox[1]
# TODO: improve conversion to INCHES
width_inch = width * 39.37
height_inch = height * 39.37
self._map_size = (int(width_inch * self._dpi / self._scale), int(height_inch * self._dpi / self._scale))
def print_pdf(self):
if self._map_image is None:
self._create_map_image()
return self.create_pdf()
def _create_map_image(self):
images = self._get_images()
self._map_image = Image.new('RGBA', self._map_size)
for img in images:
if not isinstance(img, Image.Image):
img = Image.open(BytesIO(img.content)).convert('RGBA')
self._map_image = Image.alpha_composite(self._map_image, img)
def _get_images(self):
images = []
for layer in self._payload['attributes']['map']['layers']:
if layer['type'].lower() == 'wms':
future_img = self._get_wms_image(layer)
images.append(future_img)
elif layer['type'].lower() == 'wmts':
future_img = self._get_wmts_image(layer)
images.append(future_img)
return self._loop.run_until_complete(asyncio.gather(*images))
def _get_wms_image(self, layer_info):
base_url = layer_info['baseURL']
params = {
'VERSION': '1.1.1',
'REQUEST': 'GetMap',
'LAYERS': ','.join(layer_info['layers']),
'SRS': self._projection,
'STYLES': '',
'WIDTH': self._map_size[0],
'HEIGHT': self._map_size[1],
'BBOX': ','.join([str(nb) for nb in self._bbox]),
'FORMAT': layer_info['imageFormat'],
}
custom_params = layer_info.get('customParams', {})
params.update(custom_params)
future_img = self._loop.run_in_executor(
None,
functools.partial(
requests.get,
base_url,
params=params,
headers=WMS_HEADERS
)
)
return future_img
def _get_wmts_image(self, layer_info):
matrix = layer_info['matrices'][0]
for candidate_matrix in layer_info['matrices'][1:]:
if abs(candidate_matrix['scaleDenominator'] - self._scale) < abs(matrix['scaleDenominator'] - self._scale):
matrix = candidate_matrix
if layer_info['requestEncoding'].upper() == 'REST':
return self._get_wmts_image_rest(layer_info, matrix)
def _get_wmts_image_rest(self, layer_info, matrix):
size_on_screen = matrix['tileSize'][0], matrix['tileSize'][1]
layer_resolution = matrix['scaleDenominator'] / (LAYER_DPI * 39.37)
tile_size_in_world = (size_on_screen[0] * layer_resolution, size_on_screen[1] * layer_resolution)
x_min, y_max = matrix['topLeftCorner'][0], matrix['topLeftCorner'][1]
x_max, y_min = (x_min + tile_size_in_world[0], y_max - tile_size_in_world[1])
col_min = 0
col_max = 0
row_min = 0
row_max = 0
col = 0
row = 0
wmts_bbox = [0, 0, 0, 0]
while True:
if x_min <= self._bbox[0] and x_max > self._bbox[0]:
wmts_bbox[0] = x_min
col_min = col
if x_min <= self._bbox[2] and x_max > self._bbox[2]:
col_max = col
wmts_bbox[2] = x_max
break
col += 1
x_min = x_max
x_max += tile_size_in_world[0]
while True:
if y_min < self._bbox[1] and y_max > self._bbox[1]:
row_max = row
wmts_bbox[1] = y_min
break
if y_min < self._bbox[3] and y_max > self._bbox[3]:
row_min = row
wmts_bbox[3] = y_max
row += 1
y_max = y_min
y_min -= tile_size_in_world[1]
url = layer_info['baseURL'].replace('{TileMatrix}', matrix['identifier'])
for dimension in layer_info['dimensions']:
url = url.replace('{' + dimension + '}', layer_info['dimensionParams'][dimension])
width, height = size_on_screen
combined_size = (width * (col_max - col_min + 1), height * (row_max - row_min + 1))
combined_image = Image.new('RGBA', combined_size)
wmts_images = []
wmts_images_infos = []
for col in range(col_min, col_max + 1):
for row in range(row_min, row_max + 1):
future_img = self._loop.run_in_executor(
None,
functools.partial(
requests.get,
url.replace('{TileRow}', str(row)).replace('{TileCol}', str(col)),
headers=WMS_HEADERS
)
)
wmts_images.append(future_img)
wmts_images_infos.append((row, col))
wmts_images = self._loop.run_until_complete(asyncio.gather(*wmts_images))
for index, wmts_image in enumerate(wmts_images):
resp = wmts_image
row, col = wmts_images_infos[index]
if resp.status_code != 200:
continue
img = Image.open(BytesIO(resp.content)).convert('RGBA')
combined_image.paste(img, box=(width * (col - col_min), height * (row - row_min)))
diff = self._bbox[0] - wmts_bbox[0], self._bbox[1] - wmts_bbox[1] - 800
width, height = self._bbox[2] - self._bbox[0], self._bbox[3] - self._bbox[1]
crop_box = int(diff[0] / layer_resolution), int(diff[1] / layer_resolution), int((diff[0] + width) / layer_resolution), int((diff[1] + height) / layer_resolution)
wmts_cropped = combined_image.crop(box=crop_box)
if wmts_cropped.size != self._map_size:
wmts_cropped = wmts_cropped.resize(self._map_size)
future = asyncio.Future(loop=self._loop)
future.set_result(wmts_cropped)
return future
def create_pdf(self):
return self._create_pdf_libreoffice()
#return self._create_pdf_pdftk()
def _create_pdf_libreoffice(self):
output_image = BytesIO()
self._map_image.save(output_image, 'PNG')
render = Renderer(media_path='.')
# TODO: use the configuration to select the template
# TODO: use the configuration to select the name of the key in the template
result = render.render('template.odt', my_map=output_image)
with NamedTemporaryFile(
mode='wb+',
prefix='geo-pyprint_',
delete=True
) as generated_odt:
generated_odt.write(result)
generated_odt.flush()
output_name = generated_odt.name + '.pdf'
cmd = [
'unoconv',
'-f',
'pdf',
'-o',
output_name,
generated_odt.name
]
subprocess.call(cmd, timeout=None)
return output_name
def _create_pdf_pdftk(self):
with NamedTemporaryFile(
mode='wb+',
prefix='geo-pyprint_',
delete=True
) as map_image_file:
self._map_image.save(map_image_file, 'PNG')
map_image_file.flush()
# TODO: use the configuration to select the template
# TODO: use the configuration to select the name of the key in the template
pdfjinja = PdfJinja('pdfjinja-template.pdf')
pdfout = pdfjinja(dict(map=map_image_file.name))
with NamedTemporaryFile(
mode='wb+',
prefix='geo-pyprint_',
suffix='.pdf',
delete=False
) as output_file:
pdfout.write(output_file)
output_file.flush()
return output_file.name
| |
import signal
import sys
import socket
#QT
import sip
sip.setapi('QVariant',2)
sip.setapi('QString',2)
from PyQt4 import QtCore,QtGui,QtNetwork
from klusta_process_manager.experiment import Experiment
from klusta_process_manager.general import ConsoleView
from .experimentModelServer import ExperimentModelServer
from .clientSocket import Client
from klusta_process_manager.config import *
class ServerTCP(QtGui.QWidget):
def __init__(self,parent=None):
super(ServerTCP,self).__init__(parent)
#IP adress, PORT, HOST
try:
self.ip=[(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
except:
self.ip=IP
self.port=PORT
self.host=QtNetwork.QHostAddress(self.ip)
#TCP Server
self.server=QtNetwork.QTcpServer(self)
self.server.newConnection.connect(self.on_new_connection)
if not self.server.listen(address=self.host,port=self.port):
QtGui.QMessageBox.critical(self,"Server","Unable to start server. Maybe it's already running")
self.close()
return
#Process
self.process=QtCore.QProcess()
self.process.finished.connect(self.try_process)
self.wasKill=False
self.process.setProcessChannelMode(QtCore.QProcess.MergedChannels)
self.process.readyRead.connect(self.display_output)
#Transfer
self.processSync=QtCore.QProcess()
self.processSync.finished.connect(self.try_sync)
#dealing with the klusta environment
env = QtCore.QProcess.systemEnvironment()
itemToReplace=[item for item in env if item.startswith('PATH=')]
for item in itemToReplace:
newitem=item.replace('/anaconda/bin:','/anaconda/envs/klusta/bin:')
env.remove(item)
env.append(newitem)
env.append("CONDA_DEFAULT_ENV=klusta")
self.process.setEnvironment(env)
#console
self.console=ConsoleView(self)
#model and clients
self.clientDict={}
self.model=ExperimentModelServer(self)
self.model.expStateChanged.connect(self.update_one_client)
self.model.expDone.connect(self.one_exp_done)
self.model.expFail.connect(self.one_exp_fail)
#view
self.tableView=QtGui.QTableView(self)
self.tableView.setModel(self.model)
self.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
#Experiments
self.root=QtCore.QDir(SERVER_PATH)
self.experimentDict={}
self._layout()
self.show()
def _layout(self):
self.button_kill=QtGui.QPushButton("Kill current")
self.button_kill.clicked.connect(self.kill_current)
self.button_clear=QtGui.QPushButton("Clear")
self.button_clear.clicked.connect(self.clear)
self.label_path=QtGui.QLabel("Data: "+SERVER_PATH)
self.label_IP=QtGui.QLabel("IP: "+str(self.ip))
self.label_port=QtGui.QLabel("Port: "+str(self.port))
self.label_connectedClients=QtGui.QLabel("No clients connected")
labelLayout = QtGui.QHBoxLayout()
labelLayout.addWidget(self.label_IP)
labelLayout.addSpacing(20)
labelLayout.addWidget(self.label_port)
vbox1=QtGui.QVBoxLayout()
vbox1.addWidget(self.label_path)
vbox1.addLayout(labelLayout)
vbox1.addWidget(self.console)
vbox2=QtGui.QVBoxLayout()
vbox2.addWidget(self.tableView)
vbox2.addWidget(self.button_kill)
vbox2.addWidget(self.label_connectedClients)
hbox=QtGui.QHBoxLayout()
hbox.addLayout(vbox2)
hbox.addLayout(vbox1)
self.setLayout(hbox)
def on_new_connection(self):
while self.server.hasPendingConnections():
#accept connection
newTcpSocket=self.server.nextPendingConnection()
ip=newTcpSocket.peerAddress().toString()
#check if old/new client
if ip in self.clientDict.keys():
self.clientDict[ip].update_socket(newTcpSocket)
else:
self.clientDict[ip]=Client(newTcpSocket)
self.clientDict[ip].hasNewPaths.connect(self.client_has_new_paths)
self.clientDict[ip].tcpSocket.disconnected.connect(self.update_label_client)
self.update_label_client()
def update_label_client(self):
ipList=[key for key in self.clientDict if self.clientDict[key].connected]
if len(ipList)==0:
self.label_connectedClients.setText("No clients connected")
else:
self.label_connectedClients.setText("Connected: "+", ".join(ipList))
def clear(self):
pass #clear crash ?
#(try to) close everything properly
def close(self):
pass
# if not self.process.waitForFinished(1):
# self.process.kill()
# self.wasKill=True
# self.process.waitForFinished(1)
# if not self.processSync.waitForFinished(1):
# self.processSync.kill()
# self.processSync.waitForFinished(1)
# for ip,client in self.clientDict.items():
# if client.tcpSocket.isValid():
# client.tcpSocket.flush()
# client.tcpSocket.disconnectFromHost()
# self.server.close()
def client_has_new_paths(self,ip):
newPaths=self.clientDict[ip].get_new_paths()
expToAdd=[]
expFail=[]
for path in newPaths:
expInfoDict=self.create_expInfoDict(path)
if expInfoDict is None:
folderName=QtCore.QFileInfo(path).baseName()
expFail+=[folderName,"server: could not find folder in backup"]
else:
if expInfoDict["folderName"] in self.experimentDict:
print("client resend",path)
exp=self.experimentDict[expInfoDict["folderName"]]
else:
exp=Experiment(expInfoDict)
self.experimentDict[exp.folderName]=exp
expToAdd.append(exp)
self.model.add_experiments(expToAdd,ip)
self.clientDict[ip].add_experiments(expToAdd)
self.clientDict[ip].unvalid_experiments(expFail)
self.try_sync()
def create_expInfoDict(self,path):
expInfo=None
backUP=QtCore.QFileInfo(path)
if backUP.exists() and backUP.isDir():
expInfo={}
expInfo["pathBackUP"]=backUP.canonicalFilePath()
name=backUP.baseName()
expInfo["folderName"]=name
expInfo["icon"]=None
expInfo["animalID"]=None
self.root.mkdir(name)
expInfo["pathLocal"]=self.root.filePath(name)
expInfo["dateTime"]="_".join(name.split("_")[1:])
return expInfo
def try_sync(self,exitcode=0):
if self.processSync.state()==QtCore.QProcess.Running:
return
else:
if self.model.sync_done(exitcode):
self.try_process()
self.model.sync_one_experiment(self.processSync)
def try_process(self,exitcode=0):
if self.process.state()==QtCore.QProcess.Running:
return
else:
if self.wasKill:
self.wasKill=False
exitcode=42
if self.model.process_is_done(exitcode):
self.try_sync()
if self.model.process_one_experiment(self.process):
self.console.separator(self.model.expProcessing)
def update_one_client(self,ip):
self.clientDict[ip].send_update_state()
def one_exp_done(self,ip,folderName,pathBackUP):
self.clientDict[ip].update_expDone(folderName)
def one_exp_fail(self,ip,folderName):
self.clientDict[ip].update_expFail(folderName)
def display_output(self):
byteArray=self.process.readAll()
string="".join(byteArray)
self.console.display(string)
def kill_current(self):
if self.model.expProcessing is not None:
self.process.waitForFinished(50)
if self.process.state()==QtCore.QProcess.Running:
self.process.kill()
self.wasKill=True
self.process.waitForFinished(1000)
def closeEvent(self,event):
pass
# #check if is running
# if self.processView.process.state()==QtCore.QProcess.Running
#or self.processView.processSync.state()==QtCore.QProcess.Running :
# msgBox = QtGui.QMessageBox()
# msgBox.setText("Closing the app")
# msgBox.setInformativeText("A process or transfer is running, are you sure you want to quit ?)
# msgBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
# msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
# answer = msgBox.exec_()
# if answer==QtGui.QMessageBox.Cancel:
# event.ignore()
# return
# self.processView.close()
# self.close()
# event.accept()
#-----------------------------------------------------------------------------------
if __name__=='__main__':
QtGui.QApplication.setStyle("cleanlooks")
app = QtGui.QApplication(sys.argv)
nas=QtCore.QDir(BACK_UP_PATH)
server=QtCore.QDir(SERVER_PATH)
if not nas.exists():
msgBox=QtGui.QMessageBox()
msgBox.setText("BACK_UP_PATH do not refers to a folder: "+str(BACK_UP_PATH))
msgBox.exec_()
elif not server.exists():
msgBox=QtGui.QMessageBox()
msgBox.setText("SERVER_PATH do not refers to a folder: "+str(SERVER_PATH))
msgBox.exec_()
else:
#to be able to close wth ctrl+c
signal.signal(signal.SIGINT, signal.SIG_DFL)
win=ServerTCP()
sys.exit(app.exec_())
| |
""" This module contains various functions that are special cases
of incomplete gamma functions. It should probably be renamed. """
from sympy.core import Add, S, C, sympify, cacheit, pi, I
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.miscellaneous import sqrt
# TODO series expansions
# TODO fresnel integrals
# TODO see the "Note:" in Ei
###############################################################################
################################ ERROR FUNCTION ###############################
###############################################################################
class erf(Function):
"""
The Gauss error function.
This function is defined as:
:math:`\\mathrm{erf}(x)=\\frac{2}{\\sqrt{\\pi}} \\int_0^x e^{-t^2} \\, \\mathrm{d}x`
Or, in ASCII::
x
/
|
| 2
| -t
2* | e dt
|
/
0
-------------
____
\/ pi
Examples
========
>>> from sympy import I, oo, erf
>>> from sympy.abc import z
Several special values are known:
>>> erf(0)
0
>>> erf(oo)
1
>>> erf(-oo)
-1
>>> erf(I*oo)
oo*I
>>> erf(-I*oo)
-oo*I
In general one can pull out factors of -1 and I from the argument:
>>> erf(-z)
-erf(z)
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erf(z))
erf(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erf(z), z)
2*exp(-z**2)/sqrt(pi)
We can numerically evaluate the error function to arbitrary precision
on the whole complex plane:
>>> erf(4).evalf(30)
0.999999984582742099719981147840
>>> erf(-4*I).evalf(30)
-1296959.73071763923152794095062*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/Erf.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Erf
"""
nargs = 1
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*C.exp(-self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.Zero
t = arg.extract_multiplicatively(S.ImaginaryUnit)
if t == S.Infinity or t == S.NegativeInfinity:
return arg
if arg.could_extract_minus_sign():
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return -previous_terms[-2] * x**2 * (n-2)/(n*k)
else:
return 2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_uppergamma(self, z):
return sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi))
def _eval_rewrite_as_tractable(self, z):
return S.One - _erfs(z)*C.exp(-z**2)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and C.Order(1, x).contains(arg):
return 2*x/sqrt(pi)
else:
return self.func(arg)
###############################################################################
#################### EXPONENTIAL INTEGRALS ####################################
###############################################################################
class Ei(Function):
r"""
The classical exponential integral.
For the use in SymPy, this function is defined as
.. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!}
+ \log(x) + \gamma,
where :math:`\gamma` is the Euler-Mascheroni constant.
If :math:`x` is a polar number, this defines an analytic function on the
riemann surface of the logarithm. Otherwise this defines an analytic
function in the cut plane :math:`\mathbb{C} \setminus (-\infty, 0]`.
**Background**
The name 'exponential integral' comes from the following statement:
.. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t
If the integral is interpreted as a Cauchy principal value, this statement
holds for :math:`x > 0` and :math:`\operatorname{Ei}(x)` as defined above.
Note that we carefully avoided defining :math:`\operatorname{Ei}(x)` for
negative real x. This is because above integral formula does not hold for
any polar lift of such :math:`x`, indeed all branches of
:math:`\operatorname{Ei}(x)` above the negative reals are imaginary.
However, the following statement holds for all :math:`x \in \mathbb{R}^*`:
.. math:: \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t =
\frac{\operatorname{Ei}\left(|x|e^{i \arg(x)}\right) +
\operatorname{Ei}\left(|x|e^{- i \arg(x)}\right)}{2},
where the integral is again understood to be a principal value if
:math:`x > 0`, and :math:`|x|e^{i \arg(x)}`,
:math:`|x|e^{- i \arg(x)}` denote two conjugate polar lifts of :math:`x`.
See Also
========
expint, sympy.functions.special.gamma_functions.uppergamma
References
==========
- Abramowitz & Stegun, section 5: http://www.math.sfu.ca/~cbm/aands/page_228.htm
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import Ei, polar_lift, exp_polar, I, pi
>>> from sympy.abc import x
The exponential integral in SymPy is strictly undefined for negative values
of the argument. For convenience, exponential integrals with negative
arguments are immediately converted into an expression that agrees with
the classical integral definition:
>>> Ei(-1)
-I*pi + Ei(exp_polar(I*pi))
This yields a real value:
>>> Ei(-1).n(chop=True)
-0.219383934395520
On the other hand the analytic continuation is not real:
>>> Ei(polar_lift(-1)).n(chop=True)
-0.21938393439552 + 3.14159265358979*I
The exponential integral has a logarithmic branch point at the origin:
>>> Ei(x*exp_polar(2*I*pi))
Ei(x) + 2*I*pi
Differentiation is supported:
>>> Ei(x).diff(x)
exp(x)/x
The exponential integral is related to many other special functions.
For example:
>>> from sympy import uppergamma, expint, Shi
>>> Ei(x).rewrite(expint)
-expint(1, x*exp_polar(I*pi)) - I*pi
>>> Ei(x).rewrite(Shi)
Chi(x) + Shi(x)
"""
nargs = 1
@classmethod
def eval(cls, z):
from sympy import polar_lift, exp_polar
if z.is_negative:
# Note: is this a good idea?
return Ei(polar_lift(z)) - pi*I
nz, n = z.extract_branch_factor()
if n:
return Ei(nz) + 2*I*pi*n
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return C.exp(arg)/arg
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from sympy import polar_lift
if (self.args[0]/polar_lift(-1)).is_positive:
return Function._eval_evalf(self, prec) + (I*pi)._eval_evalf(prec)
return Function._eval_evalf(self, prec)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma, polar_lift
# XXX this does not currently work usefully because uppergamma
# immediately turns into expint
return -uppergamma(0, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_expint(self, z):
from sympy import polar_lift
return -expint(1, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_Si(self, z):
return Shi(z) + Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
class expint(Function):
r"""
Generalized exponential integral.
This function is defined as
.. math:: \operatorname{E}_\nu(z) = z^{\nu - 1} \Gamma(1 - \nu, z),
where `\Gamma(1 - \nu, z)` is the upper incomplete gamma function
(``uppergamma``).
Hence for :math:`z` with positive real part we have
.. math:: \operatorname{E}_\nu(z)
= \int_1^\infty \frac{e^{-zt}}{z^\nu} \mathrm{d}t,
which explains the name.
The representation as an incomplete gamma function provides an analytic
continuation for :math:`\operatorname{E}_\nu(z)`. If :math:`\nu` is a
non-positive integer the exponential integral is thus an unbranched
function of :math:`z`, otherwise there is a branch point at the origin.
Refer to the incomplete gamma function documentation for details of the
branching behavior.
See Also
========
E1: The classical case, returns expint(1, z).
Ei: Another related function called exponential integral.
sympy.functions.special.gamma_functions.uppergamma
References
==========
- http://dlmf.nist.gov/8.19
- http://functions.wolfram.com/GammaBetaErf/ExpIntegralE/
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import expint, S
>>> from sympy.abc import nu, z
Differentiation is supported. Differentiation with respect to z explains
further the name: for integral orders, the exponential integral is an
iterated integral of the exponential function.
>>> expint(nu, z).diff(z)
-expint(nu - 1, z)
Differentiation with respect to nu has no classical expression:
>>> expint(nu, z).diff(nu)
-z**(nu - 1)*meijerg(((), (1, 1)), ((0, 0, -nu + 1), ()), z)
At non-postive integer orders, the exponential integral reduces to the
exponential function:
>>> expint(0, z)
exp(-z)/z
>>> expint(-1, z)
exp(-z)/z + exp(-z)/z**2
At half-integers it reduces to error functions:
>>> expint(S(1)/2, z)
-sqrt(pi)*erf(sqrt(z))/sqrt(z) + sqrt(pi)/sqrt(z)
At positive integer orders it can be rewritten in terms of exponentials
and expint(1, z). Use expand_func() to do this:
>>> from sympy import expand_func
>>> expand_func(expint(5, z))
z**4*expint(1, z)/24 + (-z**3 + z**2 - 2*z + 6)*exp(-z)/24
The generalised exponential integral is essentially equivalent to the
incomplete gamma function:
>>> from sympy import uppergamma
>>> expint(nu, z).rewrite(uppergamma)
z**(nu - 1)*uppergamma(-nu + 1, z)
As such it is branched at the origin:
>>> from sympy import exp_polar, pi, I
>>> expint(4, z*exp_polar(2*pi*I))
I*pi*z**3/3 + expint(4, z)
>>> expint(nu, z*exp_polar(2*pi*I))
z**(nu - 1)*(exp(2*I*pi*nu) - 1)*gamma(-nu + 1) + expint(nu, z)
"""
nargs = 2
@classmethod
def eval(cls, nu, z):
from sympy import (unpolarify, expand_mul, uppergamma, exp, gamma,
factorial)
nu2 = unpolarify(nu)
if nu != nu2:
return expint(nu2, z)
if nu.is_Integer and nu <= 0 or (not nu.is_Integer and (2*nu).is_Integer):
return unpolarify(expand_mul(z**(nu - 1)*uppergamma(1 - nu, z)))
# Extract branching information. This can be deduced from what is
# explained in lowergamma.eval().
z, n = z.extract_branch_factor()
if n == 0:
return
if nu.is_integer:
if (nu > 0) is not True:
return
return expint(nu, z) \
- 2*pi*I*n*(-1)**(nu-1)/factorial(nu-1)*unpolarify(z)**(nu - 1)
else:
return (exp(2*I*pi*nu*n) - 1)*z**(nu-1)*gamma(1 - nu) + expint(nu, z)
def fdiff(self, argindex):
from sympy import meijerg
nu, z = self.args
if argindex == 1:
return -z**(nu - 1)*meijerg([], [1, 1], [0, 0, 1 - nu], [], z)
elif argindex == 2:
return -expint(nu - 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_uppergamma(self, nu, z):
from sympy import uppergamma
return z**(nu - 1)*uppergamma(1 - nu, z)
def _eval_rewrite_as_Ei(self, nu, z):
from sympy import exp_polar, unpolarify, exp, factorial, Add
if nu == 1:
return -Ei(z*exp_polar(-I*pi)) - I*pi
elif nu.is_Integer and nu > 1:
# DLMF, 8.19.7
x = -unpolarify(z)
return x**(nu-1)/factorial(nu - 1)*E1(z).rewrite(Ei) + \
exp(x)/factorial(nu - 1) * \
Add(*[factorial(nu - k - 2)*x**k for k in range(nu - 1)])
else:
return self
def _eval_expand_func(self, **hints):
return self.rewrite(Ei).rewrite(expint, **hints)
def _eval_rewrite_as_Si(self, nu, z):
if nu != 1:
return self
return Shi(z) - Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def E1(z):
"""
Classical case of the generalized exponential integral.
This is equivalent to ``expint(1, z)``.
"""
return expint(1, z)
###############################################################################
#################### TRIGONOMETRIC INTEGRALS ##################################
###############################################################################
class TrigonometricIntegral(Function):
""" Base class for trigonometric integrals. """
nargs = 1
@classmethod
def eval(cls, z):
from sympy import Dummy, polar_lift
if z == 0:
return cls._atzero
nz = z.extract_multiplicatively(polar_lift(I))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(I)
if nz is not None:
return cls._Ifactor(nz, 1)
nz = z.extract_multiplicatively(polar_lift(-I))
if nz is not None:
return cls._Ifactor(nz, -1)
nz = z.extract_multiplicatively(polar_lift(-1))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(-1)
if nz is not None:
return cls._minusfactor(nz)
nz, n = z.extract_branch_factor()
if n == 0 and nz == z:
return
return 2*pi*I*n*cls._trigfunc(0) + cls(nz)
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return self._trigfunc(arg)/arg
def _eval_rewrite_as_Ei(self, z):
return self._eval_rewrite_as_expint(z).rewrite(Ei)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
return self._eval_rewrite_as_expint(z).rewrite(uppergamma)
def _eval_nseries(self, x, n, logx):
# NOTE this is fairly inefficient
from sympy import log, EulerGamma, Pow
n += 1
if self.args[0].subs(x, 0) != 0:
return super(TrigonometricIntegral, self)._eval_nseries(x, n, logx)
baseseries = self._trigfunc(x)._eval_nseries(x, n, logx)
if self._trigfunc(0) != 0:
baseseries -= 1
baseseries = baseseries.replace(Pow, lambda t, n: t**n/n)
if self._trigfunc(0) != 0:
baseseries += EulerGamma + log(x)
return baseseries.subs(x, self.args[0])._eval_nseries(x, n, logx)
class Si(TrigonometricIntegral):
r"""
Sine integral.
This function is defined by
.. math:: \operatorname{Si}(z) = \int_0^z \frac{\sin{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Ci: Cosine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Si
>>> from sympy.abc import z
The sine integral is an antiderivative of sin(z)/z:
>>> Si(z).diff(z)
sin(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Si(z*exp_polar(2*I*pi))
Si(z)
Sine integral behaves much like ordinary sine under multiplication by I:
>>> Si(I*z)
I*Shi(z)
>>> Si(-z)
-Si(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Si(z).rewrite(expint)
-I*(-expint(1, z*exp_polar(-I*pi/2))/2 + expint(1, z*exp_polar(I*pi/2))/2) + pi/2
"""
_trigfunc = C.sin
_atzero = S(0)
@classmethod
def _minusfactor(cls, z):
return -Si(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Shi(z)*sign
def _eval_rewrite_as_expint(self, z):
from sympy import polar_lift
# XXX should we polarify z?
return pi/2 + (E1(polar_lift(I)*z) - E1(polar_lift(-I)*z))/2/I
class Ci(TrigonometricIntegral):
r"""
Cosine integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Ci}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t
= -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Ci}(z) =
-\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right)
+ \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2}
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
The formula also holds as stated
for :math:`z \in \mathbb{C}` with :math:`Re(z) > 0`.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Ci
>>> from sympy.abc import z
The cosine integral is a primitive of cos(z)/z:
>>> Ci(z).diff(z)
cos(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Ci(z*exp_polar(2*I*pi))
Ci(z) + 2*I*pi
Cosine integral behaves somewhat like ordinary cos under multiplication by I:
>>> from sympy import polar_lift
>>> Ci(polar_lift(I)*z)
Chi(z) + I*pi/2
>>> Ci(polar_lift(-1)*z)
Ci(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Ci(z).rewrite(expint)
-expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2
"""
_trigfunc = C.cos
_atzero = S.ComplexInfinity
@classmethod
def _minusfactor(cls, z):
return Ci(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Chi(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
from sympy import polar_lift
return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2
class Shi(TrigonometricIntegral):
r"""
Sinh integral.
This function is defined by
.. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Shi
>>> from sympy.abc import z
The Sinh integral is a primitive of sinh(z)/z:
>>> Shi(z).diff(z)
sinh(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Shi(z*exp_polar(2*I*pi))
Shi(z)
Sinh integral behaves much like ordinary sinh under multiplication by I:
>>> Shi(I*z)
I*Si(z)
>>> Shi(-z)
-Shi(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Shi(z).rewrite(expint)
expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.sinh
_atzero = S(0)
@classmethod
def _minusfactor(cls, z):
return -Shi(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Si(z)*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
# XXX should we polarify z?
return (E1(z)-E1(exp_polar(I*pi)*z))/2 - I*pi/2
class Chi(TrigonometricIntegral):
r"""
Cosh integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Chi}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cosh{t} - 1}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Chi}(z) = \operatorname{Ci}\left(e^{i \pi/2}z\right)
- i\frac{\pi}{2},
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Shi: Sinh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Chi
>>> from sympy.abc import z
The cosh integral is a primitive of cosh(z)/z:
>>> Chi(z).diff(z)
cosh(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Chi(z*exp_polar(2*I*pi))
Chi(z) + 2*I*pi
Cosh integral behaves somewhat like ordinary cosh under multiplication by I:
>>> from sympy import polar_lift
>>> Chi(polar_lift(I)*z)
Ci(z) + I*pi/2
>>> Chi(polar_lift(-1)*z)
Chi(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Chi(z).rewrite(expint)
-expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.cosh
_atzero = S.ComplexInfinity
@classmethod
def _minusfactor(cls, z):
return Chi(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Ci(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
return -I*pi/2 - (E1(z) + E1(exp_polar(I*pi)*z))/2
###############################################################################
#################### HELPER FUNCTIONS #########################################
###############################################################################
class _erfs(Function):
"""
Helper function to make the :math:`erf(z)` function
tractable for the Gruntz algorithm.
"""
nargs = 1
def _eval_aseries(self, n, args0, x, logx):
if args0[0] != S.Infinity:
return super(_erfs, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S(4))**(-k)/C.factorial(k) * (1/z)**(2*k+1) for k in xrange(0,n) ]
o = C.Order(1/z**(2*n+1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -2/sqrt(S.Pi)+2*z*_erfs(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z):
return (S.One-erf(z))*C.exp(z**2)
| |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
import collections
import datetime
from eventlet import greenthread
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as oslo_vim_util
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit import test_flavors
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt import driver as v_driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('remove_unused_original_minimum_age_seconds',
'nova.virt.imagecache')
def _fake_create_session(inst):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
session._pbm_wsdl_loc = None
session._pbm = None
inst._session = session
class VMwareDriverStartupTestCase(test.NoDBTestCase):
def _start_driver_with_flags(self, expected_exception_type, startup_flags):
self.flags(**startup_flags)
with mock.patch(
'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
e = self.assertRaises(
Exception, driver.VMwareVCDriver, None) # noqa
self.assertIs(type(e), expected_exception_type)
def test_start_driver_no_user(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_password='password',
group='vmware'))
def test_start_driver_no_host(self):
self._start_driver_with_flags(
Exception,
dict(host_username='username', host_password='password',
group='vmware'))
def test_start_driver_no_password(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_username='username',
group='vmware'))
def test_start_driver_with_user_host_password(self):
# Getting the InvalidInput exception signifies that no exception
# is raised regarding missing user/password/host
self._start_driver_with_flags(
nova.exception.InvalidInput,
dict(host_ip='ip', host_password='password',
host_username="user", datastore_regex="bad(regex",
group='vmware'))
class VMwareSessionTestCase(test.NoDBTestCase):
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=False)
def test_call_method(self, mock_is_vim):
with test.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
session._vim = mock.Mock()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira', session._vim)
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=True)
def test_call_method_vim(self, mock_is_vim):
with test.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira')
class VMwareAPIVMTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API connection calls."""
REQUIRES_LOCKING = True
@mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
def setUp(self, mock_register):
super(VMwareAPIVMTestCase, self).setUp()
ds_util.dc_cache_reset()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(cluster_name='test_cluster',
host_ip='test_url',
host_username='test_username',
host_password='test_pass',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(enabled=False, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self)
vmwareapi_fake.reset()
nova.tests.unit.image.fake.stub_out_image_service(self)
self.conn = driver.VMwareVCDriver(None, False)
self._set_exception_vars()
self.node_name = self.conn._nodename
self.ds = 'ds1'
self._display_name = 'fake-display-name'
self.vim = vmwareapi_fake.FakeVim()
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info()
image_ref = nova.tests.unit.image.fake.get_valid_image_id()
(image_service, image_id) = glance.get_remote_image_service(
self.context, image_ref)
metadata = image_service.show(self.context, image_id)
self.image = objects.ImageMeta.from_dict({
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
})
self.fake_image_uuid = self.image.id
nova.tests.unit.image.fake.stub_out_image_service(self)
self.vnc_host = 'ha-host'
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.unit.image.fake.FakeImageService_reset()
def test_legacy_block_device_info(self):
self.assertFalse(self.conn.need_legacy_block_device_info)
def test_get_host_ip_addr(self):
self.assertEqual('test_url', self.conn.get_host_ip_addr())
def test_init_host_with_no_session(self):
self.conn._session = mock.Mock()
self.conn._session.vim = None
self.conn.init_host('fake_host')
self.conn._session._create_session.assert_called_once_with()
def test_init_host(self):
try:
self.conn.init_host("fake_host")
except Exception as ex:
self.fail("init_host raised: %s" % ex)
def _set_exception_vars(self):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
self.exception = False
def test_cleanup_host(self):
self.conn.init_host("fake_host")
try:
self.conn.cleanup_host("fake_host")
except Exception as ex:
self.fail("cleanup_host raised: %s" % ex)
def test_driver_capabilities(self):
self.assertTrue(self.conn.capabilities['has_imagecache'])
self.assertFalse(self.conn.capabilities['supports_recreate'])
self.assertTrue(
self.conn.capabilities['supports_migrate_to_same_host'])
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm(self, get_profile_mock):
get_profile_mock.return_value = 'fake-profile'
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy',
pbm_wsdl_location='fake-location', group='vmware')
self.conn._validate_configuration()
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm_bad_default(self, get_profile_mock):
get_profile_mock.return_value = None
self.flags(pbm_enabled=True,
pbm_wsdl_location='fake-location',
pbm_default_policy='fake-policy', group='vmware')
self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
self.conn._validate_configuration)
def test_login_retries(self):
self.attempts = 0
self.login_session = vmwareapi_fake.FakeVim()._login()
def _fake_login(_self):
self.attempts += 1
if self.attempts == 1:
raise vexc.VimConnectionException('Here is my fake exception')
return self.login_session
def _fake_check_session(_self):
return True
self.stub_out('nova.tests.unit.virt.vmwareapi.fake.FakeVim._login',
_fake_login)
self.stub_out('nova.tests.unit.virt.vmwareapi.'
'fake.FakeVim._check_session',
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
self.conn = driver.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_instance_type_by_name(self, type):
for instance_type in test_flavors.DEFAULT_FLAVOR_OBJS:
if instance_type.name == type:
return instance_type
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {},
'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
uuid=None, instance_type='m1.large',
ephemeral=None, instance_type_updates=None):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
self.type_data = dict(self._get_instance_type_by_name(instance_type))
if instance_type_updates:
self.type_data.update(instance_type_updates)
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
'display_name': self._display_name,
'id': 1,
'uuid': uuid,
'project_id': self.project_id,
'user_id': self.user_id,
'kernel_id': "fake_kernel_uuid",
'ramdisk_id': "fake_ramdisk_uuid",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': objects.Flavor(**self.type_data),
'node': node,
'memory_mb': self.type_data['memory_mb'],
'root_gb': self.type_data['root_gb'],
'ephemeral_gb': self.type_data['ephemeral_gb'],
'vcpus': self.type_data['vcpus'],
'swap': self.type_data['swap'],
'expected_attrs': ['system_metadata'],
}
if set_image_ref:
values['image_ref'] = self.fake_image_uuid
self.instance_node = node
self.uuid = uuid
self.instance = fake_instance.fake_instance_obj(
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
instance_type='m1.large', powered_on=True,
ephemeral=None, bdi=None, instance_type_updates=None):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
instance_type=instance_type,
ephemeral=ephemeral,
instance_type_updates=instance_type_updates)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=bdi)
self._check_vm_record(num_instances=num_instances,
powered_on=powered_on,
uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
def _get_vm_record(self):
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
for vm in vms.objects:
if vm.get('name') == vm_util._get_vm_name(self._display_name,
self.uuid):
return vm
self.fail('Unable to find VM backing!')
def _get_info(self, uuid=None, node=None, name=None):
uuid = uuid if uuid else self.uuid
node = node if node else self.instance_node
name = name if node else '1'
return self.conn.get_info(fake_instance.fake_instance_obj(
None,
**{'uuid': uuid,
'name': name,
'node': node}))
def _check_vm_record(self, num_instances=1, powered_on=True, uuid=None):
"""Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
if uuidutils.is_uuid_like(uuid):
self.assertEqual(num_instances, len(instances))
# Get Nova record for VM
vm_info = self._get_info()
vm = self._get_vm_record()
# Check that m1.large above turned into the right thing.
mem_kib = int(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEqual(vm_info.max_mem_kb, mem_kib)
self.assertEqual(vm_info.mem_kb, mem_kib)
self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
self.assertEqual(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
self.assertEqual("ns0:VirtualE1000",
vm.get("config.hardware.device").VirtualDevice[2].obj_name)
if powered_on:
# Check that the VM is running according to Nova
self.assertEqual(power_state.RUNNING, vm_info.state)
# Check that the VM is running according to vSphere API.
self.assertEqual('poweredOn', vm.get("runtime.powerState"))
else:
# Check that the VM is not running according to Nova
self.assertEqual(power_state.SHUTDOWN, vm_info.state)
# Check that the VM is not running according to vSphere API.
self.assertEqual('poweredOff', vm.get("runtime.powerState"))
found_vm_uuid = False
found_iface_id = False
extras = vm.get("config.extraConfig")
for c in extras.OptionValue:
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
found_iface_id = True
self.assertTrue(found_vm_uuid)
self.assertTrue(found_iface_id)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = int(self.type_data['memory_mb']) << 10
self.assertEqual(info.state, pwr_state)
self.assertEqual(info.max_mem_kb, mem_kib)
self.assertEqual(info.mem_kb, mem_kib)
self.assertEqual(info.num_cpu, self.type_data['vcpus'])
def test_instance_exists(self):
self._create_vm()
self.assertTrue(self.conn.instance_exists(self.instance))
invalid_instance = fake_instance.fake_instance_obj(None, uuid='foo',
name='bar',
node=self.node_name)
self.assertFalse(self.conn.instance_exists(invalid_instance))
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
def test_list_instance_uuids(self):
self._create_vm()
uuids = self.conn.list_instance_uuids()
self.assertEqual(1, len(uuids))
def _cached_files_exist(self, exists=True):
cache = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
if exists:
vmwareapi_fake.assertPathExists(self, str(cache))
else:
vmwareapi_fake.assertPathNotExists(self, str(cache))
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_instance_dir_disk_created(self, mock_from_image):
"""Test image file is cached when even when use_linked_clone
is False
"""
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
linked_clone=False)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
self._cached_files_exist()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_cache_dir_disk_created(self, mock_from_image):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1 * units.Ki,
disk_type=constants.DISK_TYPE_SPARSE)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
vmwareapi_fake.assertPathExists(self, str(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
self.image.disk_format = 'iso'
self._create_vm(instance_type=instance_type)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created(self):
self._iso_disk_type_created()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathNotExists(self, str(path))
def test_iso_disk_cdrom_attach(self):
iso_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.stub_out('nova.virt.vmwareapi.vmops._attach_cdrom_to_vm',
fake_attach_cdrom)
self.image.disk_format = 'iso'
self._create_vm()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_iso_disk_cdrom_attach_with_config_drive(self,
mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=80 * units.Gi,
file_type='iso',
linked_clone=False)
mock_from_image.return_value = img_props
self.flags(force_config_drive=True)
iso_path = [
ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid),
ds_obj.DatastorePath(self.ds, 'fake-config-drive')]
self.iso_index = 0
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
self.iso_index += 1
with test.nested(
mock.patch.object(self.conn._vmops,
'_attach_cdrom_to_vm',
side_effect=fake_attach_cdrom),
mock.patch.object(self.conn._vmops,
'_create_config_drive',
return_value='fake-config-drive'),
) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
self.image.disk_format = 'iso'
self._create_vm()
self.assertEqual(2, self.iso_index)
self.assertEqual(fake_attach_cdrom_to_vm.call_count, 2)
self.assertEqual(fake_create_config_drive.call_count, 1)
def test_ephemeral_disk_attach(self):
self._create_vm(ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25},
{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25}]
bdi = {'ephemerals': ephemerals}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_1.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdii_with_no_ephs(self):
bdi = {'ephemerals': []}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
iso_path = ds_obj.DatastorePath(self.ds, 'fake-config-drive')
self.cd_attach_called = False
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.cd_attach_called = True
with test.nested(
mock.patch.object(self.conn._vmops, '_attach_cdrom_to_vm',
side_effect=fake_attach_cdrom),
mock.patch.object(self.conn._vmops, '_create_config_drive',
return_value='fake-config-drive'),
) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
self._create_vm()
self.assertTrue(self.cd_attach_called)
@mock.patch.object(vmops.VMwareVMOps, 'power_off')
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume')
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes(self,
mock_destroy,
mock_detach_volume,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
self.assertEqual(vm_states.STOPPED, self.instance.vm_state)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(vmops.VMwareVMOps, 'power_off',
side_effect=vexc.ManagedObjectNotFoundException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_missing(self,
mock_destroy,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.NovaException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_exception(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertRaises(exception.NovaException,
self.conn.destroy, self.context, self.instance,
self.network_info, block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertFalse(mock_destroy.called)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.DiskNotFound(message='oh man'))
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_disk_not_found(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertTrue(mock_destroy.called)
mock_destroy.assert_called_once_with(self.instance, True)
def test_spawn(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_vm_ref_cached(self):
uuid = uuidutils.generate_uuid()
self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
self._create_vm(uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
def test_spawn_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_root_size_0(self):
self._create_vm(instance_type='m1.micro')
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
cache = ('[%s] vmware_base/%s/%s.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
vmwareapi_fake.assertPathExists(self, cache)
vmwareapi_fake.assertPathNotExists(self, gb_cache)
def _spawn_with_delete_exception(self, fault=None):
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "DeleteDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=fault)
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
if fault:
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
else:
self.assertRaises(vexc.VMwareDriverException, self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_delete_exception_not_found(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
def test_spawn_with_delete_exception_file_fault(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
def test_spawn_with_delete_exception_cannot_delete_file(self):
self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
def test_spawn_with_delete_exception_file_locked(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
def test_spawn_with_delete_exception_general(self):
self._spawn_with_delete_exception()
def test_spawn_disk_extend(self):
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_exists(self):
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
def _fake_extend(instance, requested_size, name, dc_ref):
vmwareapi_fake._add_file(str(root))
with test.nested(
mock.patch.object(self.conn._vmops, '_extend_virtual_disk',
side_effect=_fake_extend)
) as (fake_extend_virtual_disk):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
vmwareapi_fake.assertPathExists(self, str(root))
self.assertEqual(1, fake_extend_virtual_disk[0].call_count)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_sparse(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=units.Ki,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
with test.nested(
mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
) as (mock_extend, mock_get_dc):
dc_val = mock.Mock()
dc_val.ref = "fake_dc_ref"
dc_val.name = "dc1"
mock_get_dc.return_value = dc_val
self._create_vm()
iid = img_props.image_id
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
iid, '%s.80.vmdk' % iid)
mock_extend.assert_called_once_with(
self.instance, self.instance.root_gb * units.Mi,
str(cached_image), "fake_dc_ref")
def test_spawn_disk_extend_failed_copy(self):
# Spawn instance
# copy for extend fails without creating a file
#
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == 'fake-copy-task':
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "CopyVirtualDisk_Task":
return 'fake-copy-task'
return self.call_method(module, method, *args, **kwargs)
with test.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
def test_spawn_disk_extend_failed_partial_copy(self):
# Spawn instance
# Copy for extend fails, leaving a file behind
#
# Expect the file to be cleaned up
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, cached_image)
def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
# Spawn instance
# Copy for extend fails, leaves file behind
# File cleanup fails
#
# Expect file to be left behind
# Expect file cleanup error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
DeleteError = vexc.CannotDeleteFileException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
elif task_ref == 'fake-delete-task':
raise DeleteError('Delete failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "DeleteDatastoreFile_Task":
return 'fake-delete-task'
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method)):
self.assertRaises(DeleteError, self._create_vm)
vmwareapi_fake.assertPathExists(self, cached_image)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_invalid_disk_size(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=82 * units.Gi,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' %
self.fake_image_uuid)
tmp_file = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80-flat.vmdk' %
self.fake_image_uuid)
NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
raise NoDiskSpace()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == 'ExtendVirtualDisk_Task':
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (mock_wait_for_task, mock_call_method):
self.assertRaises(NoDiskSpace, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, str(cached_image))
vmwareapi_fake.assertPathNotExists(self, str(tmp_file))
def test_spawn_with_move_file_exists_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise an file exists exception. The flag
# self.exception will be checked to see that
# the exception has indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.FileAlreadyExistsException()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def test_spawn_with_move_general_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a general exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.VMwareDriverException('Exception!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_move_poll_exception(self):
self.call_method = self.conn._session._call_method
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
task_mdo = vmwareapi_fake.create_task(method, "error")
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
def test_spawn_with_move_file_exists_poll_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a file exists exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=vmwareapi_fake.FileAlreadyExists())
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def _spawn_attach_volume_vmdk(self, set_image_ref=True):
self._create_instance(set_image_ref=set_image_ref)
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('vmdk')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_res_pool_of_vm')
volumeops.VMwareVolumeOps._get_res_pool_of_vm(
mox.IgnoreArg()).AndReturn('fake_res_pool')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_relocate_vmdk_volume')
volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
'fake_res_pool', mox.IgnoreArg())
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'block_device_mapping': root_disk}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('iscsi')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'mount_device': 'vda'}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_hw_versions(self):
updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}}
self._create_vm(instance_type_updates=updates)
vm = self._get_vm_record()
version = vm.get("version")
self.assertEqual('vmx-08', version)
def mock_upload_image(self, context, image, instance, session, **kwargs):
self.assertEqual('Test-Snapshot', image)
self.assertEqual(self.instance, instance)
self.assertEqual(1024, kwargs['vmdk_size'])
def test_get_vm_ref_using_extra_config(self):
self._create_vm()
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
# Disrupt the fake Virtual Machine object so that extraConfig
# cannot be matched.
fake_vm = self._get_vm_record()
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
# We should not get a Virtual Machine through extraConfig.
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNone(vm_ref, 'VM Reference should be none')
# Check if we can find the Virtual Machine using the name.
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def test_search_vm_ref_by_identifier(self):
self._create_vm()
vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
fake_vm = self._get_vm_record()
fake_vm.set("summary.config.instanceUuid", "foo")
fake_vm.set("name", "foo")
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
self.assertIsNone(vm_util.search_vm_ref_by_identifier(
self.conn._session, self.instance['uuid']),
"VM Reference should be none")
self.assertIsNotNone(
vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
"VM Reference should not be none")
def test_get_object_for_optionvalue(self):
self._create_vm()
vms = self.conn._session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
vm_ref = vm_util._get_object_for_optionvalue(vms,
self.instance["uuid"])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def _test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
with mock.patch.object(images, 'upload_image_stream_optimized',
self.mock_upload_image):
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot(self):
self._create_vm()
self._test_snapshot()
def test_snapshot_no_root_disk(self):
self._iso_disk_type_created(instance_type='m1.micro')
self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_delete_vm_snapshot(self):
self._create_vm()
fake_vm = self._get_vm_record()
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_create_vm_snapshot')
self.conn._vmops._create_vm_snapshot(
self.instance, fake_vm.obj).AndReturn(snapshot_ref)
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_delete_vm_snapshot')
self.conn._vmops._delete_vm_snapshot(
self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=exception),
mock.patch.object(vmops, '_time_sleep_wrapper')
) as (_fake_wait, _fake_sleep):
if exception != vexc.TaskInProgress:
self.assertRaises(exception,
self.conn._vmops._delete_vm_snapshot,
self.instance, fake_vm, snapshot_ref)
self.assertEqual(0, _fake_sleep.call_count)
else:
self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
snapshot_ref)
self.assertEqual(call_count - 1, _fake_sleep.call_count)
self.assertEqual(call_count, _fake_wait.call_count)
def test_snapshot_delete_vm_snapshot_exception(self):
self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
def test_snapshot_delete_vm_snapshot_exception_retry(self):
self.flags(api_retry_count=5, group='vmware')
self._snapshot_delete_vm_snapshot_exception(vexc.TaskInProgress,
5)
def test_reboot(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_hard(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "HARD"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_with_uuid(self):
"""Test fall back to use name when can't find by uuid."""
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
instances = [self.instance]
self.conn.poll_rebooting_instances(60, instances)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_suspend(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.context, self.instance)
def test_resume(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.context, self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.context, self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.context, self.instance, self.network_info)
def test_power_off(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
@mock.patch.object(driver.VMwareVCDriver, 'reboot')
@mock.patch.object(vm_util, 'get_vm_state',
return_value='poweredOff')
def test_resume_state_on_host_boot(self, mock_get_vm_state,
mock_reboot):
self._create_instance()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
mock_reboot.assert_called_once_with(self.context, self.instance,
'network_info', 'hard', None)
def test_resume_state_on_host_boot_no_reboot(self):
self._create_instance()
for state in ['poweredOn', 'suspended']:
with test.nested(
mock.patch.object(driver.VMwareVCDriver, 'reboot'),
mock.patch.object(vm_util, 'get_vm_state',
return_value=state)
) as (mock_reboot, mock_get_vm_state):
self.conn.resume_state_on_host_boot(self.context,
self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
self.assertFalse(mock_reboot.called)
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.detach_volume')
def test_detach_instance_volumes(
self, detach_volume, block_device_info_get_mapping):
self._create_vm()
def _mock_bdm(connection_info, device_name):
return {'connection_info': connection_info,
'device_name': device_name}
disk_1 = _mock_bdm(mock.sentinel.connection_info_1, 'dev1')
disk_2 = _mock_bdm(mock.sentinel.connection_info_2, 'dev2')
block_device_info_get_mapping.return_value = [disk_1, disk_2]
detach_volume.side_effect = [None, exception.DiskNotFound("Error")]
with mock.patch.object(self.conn, '_vmops') as vmops:
block_device_info = mock.sentinel.block_device_info
self.conn._detach_instance_volumes(self.instance,
block_device_info)
block_device_info_get_mapping.assert_called_once_with(
block_device_info)
vmops.power_off.assert_called_once_with(self.instance)
self.assertEqual(vm_states.STOPPED, self.instance.vm_state)
exp_detach_calls = [mock.call(mock.sentinel.connection_info_1,
self.instance, 'dev1'),
mock.call(mock.sentinel.connection_info_2,
self.instance, 'dev2')]
self.assertEqual(exp_detach_calls, detach_volume.call_args_list)
def test_destroy(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
def test_destroy_no_datastore(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
# Delete the vmPathName
vm = self._get_vm_record()
vm.delete('config.files.vmPathName')
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def test_destroy_non_existent(self):
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self._create_instance()
self.conn.destroy(self.context, self.instance,
self.network_info,
None, self.destroy_disks)
mock_destroy.assert_called_once_with(self.instance,
self.destroy_disks)
def test_destroy_instance_without_compute(self):
instance = fake_instance.fake_instance_obj(None)
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self.conn.destroy(self.context, instance,
self.network_info,
None, self.destroy_disks)
self.assertFalse(mock_destroy.called)
def _destroy_instance_without_vm_ref(self,
task_state=None):
def fake_vm_ref_from_name(session, vm_name):
return 'fake-ref'
self._create_instance()
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref_from_name',
fake_vm_ref_from_name),
mock.patch.object(self.conn._session,
'_call_method'),
mock.patch.object(self.conn._vmops,
'_destroy_instance')
) as (mock_get, mock_call, mock_destroy):
self.instance.task_state = task_state
self.conn.destroy(self.context, self.instance,
self.network_info,
None, True)
if task_state == task_states.RESIZE_REVERTING:
expected = 0
else:
expected = 1
self.assertEqual(expected, mock_destroy.call_count)
self.assertFalse(mock_call.called)
def test_destroy_instance_without_vm_ref(self):
self._destroy_instance_without_vm_ref()
def test_destroy_instance_without_vm_ref_with_resize_revert(self):
self._destroy_instance_without_vm_ref(
task_state=task_states.RESIZE_REVERTING)
def _rescue(self, config_drive=False):
# validate that the power on is only called once
self._power_on = vm_util.power_on_instance
self._power_on_called = 0
def fake_attach_disk_to_vm(vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
disk_size=None, linked_clone=False,
controller_key=None, unit_number=None,
device_name=None):
info = self.conn.get_info(instance)
self._check_vm_info(info, power_state.SHUTDOWN)
if config_drive:
def fake_create_config_drive(instance, injected_files, password,
network_info, data_store_name,
folder, instance_uuid, cookies):
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
return str(ds_obj.DatastorePath(data_store_name,
instance_uuid, 'fake.iso'))
self.stub_out('nova.virt.vmwareapi.vmops._create_config_drive',
fake_create_config_drive)
self._create_vm()
def fake_power_on_instance(session, instance, vm_ref=None):
self._power_on_called += 1
return self._power_on(session, instance, vm_ref=vm_ref)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.stub_out('nova.virt.vmwareapi.vm_util.power_on_instance',
fake_power_on_instance)
self.stub_out('nova.virt.vmwareapi.volumeops.'
'VMwareVolumeOps.attach_disk_to_vm',
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
self.image, 'fake-password')
info = self.conn.get_info({'name': '1',
'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info({'name': '1-orig',
'uuid': '%s-orig' % self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
self.assertEqual(1, self._power_on_called)
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
'consumedOverheadMemory': 20, 'numEthernetCards': 1,
'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
'memoryOverhead': 21417984,
'guestMemoryUsage': 0, 'connectionState': 'connected',
'memorySizeMB': 512, 'balloonedMemory': 0,
'vmPathName': 'fake_path', 'template': False,
'overallCpuUsage': 0, 'powerState': 'poweredOn',
'cpuReservation': 0, 'overallCpuDemand': 0,
'numVirtualDisks': 1, 'hostMemoryUsage': 141}
expected = {'vmware:' + k: v for k, v in expected.items()}
instance = fake_instance.fake_instance_obj(None,
name=1,
uuid=self.uuid,
node=self.instance_node)
self.assertThat(
self.conn.get_diagnostics(instance),
matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
self._create_vm()
expected = {'uptime': 0,
'memory_details': {'used': 0, 'maximum': 512},
'nic_details': [],
'driver': 'vmwareapi',
'state': 'running',
'version': '1.0',
'cpu_details': [],
'disk_details': [],
'hypervisor_os': 'esxi',
'config_drive': 'False'}
instance = objects.Instance(uuid=self.uuid,
config_drive=False,
system_metadata={},
node=self.instance_node)
actual = self.conn.get_instance_diagnostics(instance)
self.assertThat(actual.serialize(), matchers.DictMatches(expected))
def test_get_console_output(self):
self.assertRaises(NotImplementedError, self.conn.get_console_output,
None, None)
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.context,
self.instance)
def _test_get_vnc_console(self):
self._create_vm()
fake_vm = self._get_vm_record()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
opt_val = OptionValue(key='', value=5906)
fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
vnc_console = self.conn.get_vnc_console(self.context, self.instance)
self.assertEqual(self.vnc_host, vnc_console.host)
self.assertEqual(5906, vnc_console.port)
def test_get_vnc_console(self):
self._test_get_vnc_console()
def test_get_vnc_console_noport(self):
self._create_vm()
self.assertRaises(exception.ConsoleTypeUnavailable,
self.conn.get_vnc_console,
self.context,
self.instance)
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
fake_vm = self._get_vm_record()
fake_vm_id = fake_vm.obj.value
self.assertEqual('test_url', connector_dict['ip'])
self.assertEqual('iscsi-name', connector_dict['initiator'])
self.assertEqual('test_url', connector_dict['host'])
self.assertEqual(fake_vm_id, connector_dict['instance'])
def _test_vmdk_connection_info(self, type):
return {'driver_volume_type': type,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
def test_volume_attach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_vmdk')
volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_vmdk')
volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_vmdk_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', adapter_type, disk_type, 64,
device)
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm'),
mock.patch.object(volumeops.VMwareVolumeOps,
'_update_volume_details')
) as (get_vm_ref, get_volume_ref, get_vmdk_info,
attach_disk_to_vm, update_volume_details):
self.conn.attach_volume(None, connection_info, self.instance,
'/dev/vdc')
get_vm_ref.assert_called_once_with(self.conn._session,
self.instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref,
self.instance, adapter_type, disk_type, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'],
disk_uuid)
def test_detach_vmdk_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
with mock.patch.object(volumeops.VMwareVolumeOps,
'detach_volume') as detach_volume:
self.conn.detach_volume(connection_info, self.instance,
'/dev/vdc', encryption=None)
detach_volume.assert_called_once_with(connection_info,
self.instance)
def test_volume_attach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_iscsi')
volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_iscsi')
volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_iscsi_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_host:port'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
discover = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
# simulate target not found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn((None, None))
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_add_send_target_host')
# rescan gets called with target portal
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_rescan_hba')
volumeops.VMwareVolumeOps._iscsi_rescan_hba(
connection_info['data']['target_portal'])
# simulate target found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(discover)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_disk_to_vm')
volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(), 'rdmp',
device_name=mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_iscsi_rescan_hba(self):
fake_target_portal = 'fake_target_host:port'
host_storage_sys = vmwareapi_fake._get_objects(
"HostStorageSystem").objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
# Check the host system does not have the send target
self.assertRaises(AttributeError, getattr, iscsi_hba,
'configuredSendTarget')
# Rescan HBA with the target portal
vops = volumeops.VMwareVolumeOps(self.conn._session)
vops._iscsi_rescan_hba(fake_target_portal)
# Check if HBA has the target portal configured
self.assertEqual('fake_target_host',
iscsi_hba.configuredSendTarget[0].address)
# Rescan HBA with same portal
vops._iscsi_rescan_hba(fake_target_portal)
self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
def test_iscsi_get_target(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
host = vmwareapi_fake._get_objects('HostSystem').objects[0]
host._add_iscsi_target(data)
vops = volumeops.VMwareVolumeOps(self.conn._session)
result = vops._iscsi_get_target(data)
self.assertEqual(('fake-device', 'fake-uuid'), result)
def test_detach_iscsi_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_portal'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
find = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(find)
self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
device = 'fake_device'
vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'detach_disk_from_vm')
volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
self.instance, device, destroy_disk=True)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_connection_info_get(self):
self._create_vm()
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertIn('instance', connector)
def test_connection_info_get_after_destroy(self):
self._create_vm()
self.conn.destroy(self.context, self.instance, self.network_info)
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertNotIn('instance', connector)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self.conn.refresh_instance_security_rules,
instance=None)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_image_used(self, mock_get_by_inst):
self._create_vm()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
def _get_timestamp_filename(self):
return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
self.old_time.strftime(imagecache.TIMESTAMP_FORMAT))
def _override_time(self):
self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
def _fake_get_timestamp_filename(fake):
return self._get_timestamp_filename()
self.stub_out('nova.virt.vmwareapi.imagecache.'
'ImageCacheManager._get_timestamp_filename',
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
timestamp = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
self._get_timestamp_filename() + '/')
if exists:
vmwareapi_fake.assertPathExists(self, str(timestamp))
else:
vmwareapi_fake.assertPathNotExists(self, str(timestamp))
def _image_aging_image_marked_for_deletion(self):
self._create_vm(uuid=uuidutils.generate_uuid())
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
self._timestamp_file_exists()
def test_image_aging_image_marked_for_deletion(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
def _timestamp_file_removed(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
self._create_vm(num_instances=2,
uuid=uuidutils.generate_uuid())
self._timestamp_file_exists(exists=False)
def test_timestamp_file_removed_spawn(self):
self._timestamp_file_removed()
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_timestamp_file_removed_aging(self, mock_get_by_inst):
self._timestamp_file_removed()
ts = self._get_timestamp_filename()
ts_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid, ts + '/')
vmwareapi_fake._add_file(str(ts_path))
self._timestamp_file_exists()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._timestamp_file_exists(exists=False)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_disabled(self, mock_get_by_inst):
self._override_time()
self.flags(remove_unused_base_images=False)
self._create_vm()
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist(exists=True)
self._timestamp_file_exists(exists=False)
def _image_aging_aged(self, aging_time=100):
self._override_time()
cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.flags(remove_unused_original_minimum_age_seconds=aging_time)
self._image_aging_image_marked_for_deletion()
all_instances = []
self.useFixture(utils_fixture.TimeFixture(cur_time))
self.conn.manage_image_cache(self.context, all_instances)
def test_image_aging_aged(self):
self._image_aging_aged(aging_time=8)
self._cached_files_exist(exists=False)
def test_image_aging_not_aged(self):
self._image_aging_aged()
self._cached_files_exist()
def test_public_api_signatures(self):
self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
def test_register_extension(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value=None) as mock_call_method:
self.conn._register_openstack_extension()
mock_call_method.assert_has_calls(
[mock.call(oslo_vim_util, 'find_extension',
constants.EXTENSION_KEY),
mock.call(oslo_vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)])
def test_register_extension_already_exists(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value='fake-extension') as mock_find_ext:
self.conn._register_openstack_extension()
mock_find_ext.assert_called_once_with(oslo_vim_util,
'find_extension',
constants.EXTENSION_KEY)
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def _setup_mocks_for_session(self, mock_init):
mock_init.return_value = None
vcdriver = driver.VMwareVCDriver(None, False)
vcdriver._session = mock.Mock()
vcdriver._session.vim = None
def side_effect():
vcdriver._session.vim = mock.Mock()
vcdriver._session._create_session.side_effect = side_effect
return vcdriver
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'action')
def test_host_maintenance_mode(self):
self.assertRaises(NotImplementedError,
self.conn.host_maintenance_mode, 'host', 'mode')
def test_set_host_enabled(self):
self.assertRaises(NotImplementedError,
self.conn.set_host_enabled, 'state')
def test_datastore_regex_configured(self):
self.assertEqual(self.conn._datastore_regex,
self.conn._vmops._datastore_regex)
self.assertEqual(self.conn._datastore_regex,
self.conn._vc_state._datastore_regex)
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
def test_datastore_regex_configured_vcstate(self, mock_get_ds_ref):
vcstate = self.conn._vc_state
self.conn.get_available_resource(self.node_name)
mock_get_ds_ref.assert_called_with(
vcstate._session, vcstate._cluster, vcstate._datastore_regex)
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
self.assertEqual(32, stats['vcpus'])
self.assertEqual(1024, stats['local_gb'])
self.assertEqual(1024 - 500, stats['local_gb_used'])
self.assertEqual(1000, stats['memory_mb'])
self.assertEqual(500, stats['memory_mb_used'])
self.assertEqual('VMware vCenter Server', stats['hypervisor_type'])
self.assertEqual(5001000, stats['hypervisor_version'])
self.assertEqual(self.node_name, stats['hypervisor_hostname'])
self.assertIsNone(stats['cpu_info'])
self.assertEqual(
[("i686", "vmware", "hvm"), ("x86_64", "vmware", "hvm")],
stats['supported_instances'])
def test_invalid_datastore_regex(self):
# Tests if we raise an exception for Invalid Regular Expression in
# vmware_datastore_regex
self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01',
group='vmware')
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
def test_get_available_nodes(self):
nodelist = self.conn.get_available_nodes()
self.assertEqual(1, len(nodelist))
self.assertIn(self.node_name, nodelist)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_sparse_image(self, mock_from_image):
img_info = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=False)
mock_from_image.return_value = img_info
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.plug_vifs,
instance=self.instance, network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.unplug_vifs,
instance=self.instance, network_info=None)
def _create_vif(self):
gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_4 = network_model.IP(address='8.8.8.8', type=None)
subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_4],
gateway=gw_4,
routes=None,
dhcp_server='191.168.1.1')
gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gw_6,
ips=None,
routes=None)
network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_4,
subnet_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
return vif_bridge_neutron
def _validate_interfaces(self, id, index, num_iface_ids):
vm = self._get_vm_record()
found_iface_id = False
extras = vm.get("config.extraConfig")
key = "nvp.iface-id.%s" % index
num_found = 0
for c in extras.OptionValue:
if c.key.startswith("nvp.iface-id."):
num_found += 1
if c.key == key and c.value == id:
found_iface_id = True
self.assertTrue(found_iface_id)
self.assertEqual(num_iface_ids, num_found)
def _attach_interface(self, vif):
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_attach_interface(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
def test_attach_interface_with_exception(self):
self._create_vm()
vif = self._create_vif()
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceAttachFailed,
self.conn.attach_interface,
self.instance, self.image, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def _detach_interface(self, vif, mock_get_device):
self._create_vm()
self._attach_interface(vif)
self.conn.detach_interface(self.instance, vif)
self._validate_interfaces('free', 1, 2)
def test_detach_interface(self):
vif = self._create_vif()
self._detach_interface(vif)
def test_detach_interface_and_attach(self):
vif = self._create_vif()
self._detach_interface(vif)
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_detach_interface_no_device(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
def test_detach_interface_no_vif_match(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
vif['id'] = 'bad-id'
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def test_detach_interface_with_exception(self, mock_get_device):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceDetachFailed,
self.conn.detach_interface,
self.instance, vif)
def test_resize_to_smaller_disk(self):
self._create_vm(instance_type='m1.large')
flavor = self._get_instance_type_by_name('m1.small')
self.assertRaises(exception.InstanceFaultRollback,
self.conn.migrate_disk_and_power_off, self.context,
self.instance, 'fake_dest', flavor, None)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk()
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False)
def test_pause(self):
# Tests that the VMwareVCDriver does not implement the pause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
def test_unpause(self):
# Tests that the VMwareVCDriver does not implement the unpause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.unpause,
self.instance)
def test_datastore_dc_map(self):
self.assertEqual({}, ds_util._DS_DC_MAPPING)
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(ds_util._DS_DC_MAPPING))
def test_pre_live_migration(self):
self.assertRaises(NotImplementedError,
self.conn.pre_live_migration, self.context,
'fake_instance', 'fake_block_device_info',
'fake_network_info', 'fake_disk_info')
def test_live_migration(self):
self.assertRaises(NotImplementedError,
self.conn.live_migration, self.context,
'fake_instance', 'fake_dest', 'fake_post_method',
'fake_recover_method')
def test_rollback_live_migration_at_destination(self):
self.assertRaises(NotImplementedError,
self.conn.rollback_live_migration_at_destination,
self.context, 'fake_instance', 'fake_network_info',
'fake_block_device_info')
def test_post_live_migration(self):
self.assertIsNone(self.conn.post_live_migration(self.context,
'fake_instance', 'fake_block_device_info'))
def test_get_instance_disk_info_is_implemented(self):
# Ensure that the method has been implemented in the driver
instance = objects.Instance()
try:
disk_info = self.conn.get_instance_disk_info(instance)
self.assertIsNone(disk_info)
except NotImplementedError:
self.fail("test_get_instance_disk_info() should not raise "
"NotImplementedError")
def test_get_host_uptime(self):
self.assertRaises(NotImplementedError,
self.conn.get_host_uptime)
def test_pbm_wsdl_location(self):
self.flags(pbm_enabled=True,
pbm_wsdl_location='fira',
group='vmware')
self.conn._update_pbm_location()
self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
self.assertIsNone(self.conn._session._pbm)
def test_nodename(self):
test_mor = "domain-26"
self.assertEqual("%s.%s" % (test_mor,
vmwareapi_fake._FAKE_VCENTER_UUID),
self.conn._create_nodename(test_mor),
"VC driver failed to create the proper node name")
@mock.patch.object(driver.LOG, 'warning')
def test_min_version(self, mock_warning):
self.conn._check_min_version()
self.assertFalse(mock_warning.called)
@mock.patch.object(driver.LOG, 'warning')
@mock.patch.object(oslo_vim_util, 'get_vc_version',
return_value='5.0.0')
def test_invalid_min_version(self, mock_version, mock_warning):
self.conn._check_min_version()
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
| |
#!/usr/bin/env python
"""
VirFindR. Presence/absence of virulence factors in draft genomes
"""
# Copyright 2013 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys, os, traceback, argparse
import time
import glob
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy as np
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial.distance import pdist
from Bio.Blast import NCBIXML
__author__ = "Mitchell Stanton-Cook"
__licence__ = "ECL"
__version__ = "2.0"
__email__ = "m.stantoncook@gmail.com"
epi = "Licence: "+ __licence__ + " by " + __author__ + " <" + __email__ + ">"
USAGE = "VirFindR -h"
def prepare_db(db_path):
"""
Given a VF db, extract all possible hits and hit classes
A Vf db is a mfa file in the format:
>ident, gene id, annotation, organism [class]
:param db_path: the fullpath to a VF db (mfa file)
:type db_path: string
:rtype: list of all vfs and corresponding classes
"""
with open(db_path) as fin:
lines = fin.readlines()
vfs_list, vfs_class = [], []
# Get the IDS and the classes
for l in lines:
if l.startswith('>'):
vfs_list.append(l.split(',')[1].strip())
vfs_class.append(l.split('[')[-1].split(']')[0].strip())
# Check for duplicates
unique = list(set(vfs_list))
print "Investigating "+str(len(unique))+" features"
for e in unique:
if vfs_list.count(e) != 1:
print "Duplicates found for: ",e
print "Fix duplicates"
sys.exit(1)
return vfs_list, vfs_class
def order_inputs(order_index_file, dir_listing):
"""
Given an order index file, maintain this order in the matrix plot
THIS IMPLIES NO CLUSTERING!
Typically used when you already have a phlogenetic tree
:param order_index_file: full path to a ordered file (1 entry per line)
:param dir_listing: a glob.glob dir listing as a list
:type order_index_file: string
:type dir_listing: list
:rtype: list of updated glob.glob dir listing to match order specified
"""
with open(order_index_file) as fin:
lines = fin.readlines()
if len(lines) != len(dir_listing):
print "In order_inputs(). Length mismatch"
sys.exit(1)
ordered = []
for l in lines:
cord = l.strip()
for d in dir_listing:
tmp = d.strip().split('/')[-1]
if tmp.find('_') == -1:
cur = tmp.split('.')[0]
else:
cur = tmp.split("_")[0]
if cur == cord:
ordered.append(d)
break
if len(ordered) != len(dir_listing):
print "In order_inputs(). Not 1-1 matching. Typo?"
print ordered
print dir_listing
sys.exit(1)
return ordered
def make_BLASTDB(fasta_file):
"""
Given a fasta_file, generate a nucleotide BLAST database
Database will end up in DB/ of working directory
:param fasta_file: full path to a fasta file
:type fasta_file: string
:rtype: the strain id (must be delimited by '_')
"""
os.system("makeblastdb -dbtype nucl -in %s" % (fasta_file))
os.system("mv %s.nhr %s.nin %s.nsq DBs" %
(fasta_file, fasta_file, fasta_file))
os.system("cp %s DBs" % (fasta_file))
# Get the strain ID
return fasta_file.split('_')[0].split('/')[1]
def run_BLAST(query, database):
"""
Given a mfa of query virulence factors and a database, search for them
Turns dust filter off. Only run on a single thread. Only a single target
sequence. Output in XML format as blast.xml
:param query: the fullpath to the vf.mfa
:param database: the full path of the databse to search for the vf in
:type query: string
:type database: string
"""
os.system("blastn -query "+query+" -db "+database+" -num_threads 1 "
"-outfmt 5 -max_target_seqs 1 -dust no -out blast.xml")
def parse_BLAST(blast_results, tol):
"""
Using NCBIXML parse the BLAST results, storing good hits.
Here good hits are:
* hsp.identities/float(record.query_length) >= tol
:param blast_results: full path to a blast run output file (in XML format)
:param tol: the cutoff threshold (see above for explaination)
:type blast_results: string
:type tol: string
:rtype: list of satifying hit names
"""
vf_hits = []
for record in NCBIXML.parse(open(blast_results)) :
for align in record.alignments :
for hsp in align.hsps :
virFactorName = record.query.split(',')[1].strip()
if hsp.identities/float(record.query_length) >= tol:
vf_hits.append(virFactorName.strip())
return vf_hits
def build_matrix_row(all_vfs, accepted_hits , score=None):
"""
Populate row given all possible hits, accepted hits and an optional score
:param all_vfs: a list of all virulence factor ids
:param accepted_hits: a list of a hits that passed the cutoof
:param score: the value to fill the matrix with (default = None which
implies 0.5)
:type all_vfs: list
:type accepted_hits: list
:type score: float
:rtype: a list of floats
"""
if score == None:
score = 0.0
row = []
for factor in all_vfs:
if factor in accepted_hits:
row.append(score)
else:
row.append(0.5)
return row
def match_matrix_rows(ass_mat, cons_mat):
"""
Reorder a second matrix based on the first row element of the 1st matrix
:param ass_mat: a 2D list of scores
:param cons_mat: a 2D list scores
:type ass_mat: list
:type cons_mat: list
:rtype: 2 matricies (2D lists)
"""
reordered_ass, reordered_cons = [], []
for i in range(0, len(ass_mat)):
for j in range(0, len(cons_mat)):
if ass_mat[i][0] == cons_mat[j][0]:
reordered_ass.append(ass_mat[i][1:])
reordered_cons.append(cons_mat[j][1:])
break
return reordered_ass, reordered_cons
def strip_id_from_matrix(mat):
"""
Remove the ID (1st row element) form a matrix
:param mat: a 2D list
:rtype: a 2D list with the 1st row elelemnt (ID) removed
"""
new_mat = []
for i in range(0, len(mat)):
new_mat.append(mat[i][1:])
return new_mat
def cluster_matrix(matrix, y_labels):
"""
From a matrix, generate a distance matrix & perform hierarchical clustering
:param matrix: a numpy matrix of scores
:param y_labels: the virulence factor ids for all row elements
"""
print "Clustering the matrix"
# Clear any matplotlib formatting
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
# Hide x labels/ticks
ax.set_yticklabels([])
ax.set_yticks([])
plt.xticks(fontsize=6)
Y = pdist(matrix)
Z = linkage(Y)
dend = dendrogram(Z,labels=y_labels)
plt.savefig("dendrogram.png", dpi=600)
#Reshape
ordered_index = dend['leaves']
updated_ylabels = dend['ivl']
tmp = []
for i in range(0, len(ordered_index)):
tmp.append(list(matrix[ordered_index[i],:]))
matrix = np.array(tmp)
return matrix, updated_ylabels
def plot_matrix(matrix, strain_labels, vfs_classes, gene_labels,
show_gene_labels, color_index, aspect='auto'):
"""
Plot the VF hit matrix
:param matrix: the numpy matrix of scores
:param strain_labels: the strain (y labels)
:param vfs_classes: the VFS class (in mfa header [class])
:param gene_labels: the gene labels
:param show_gene_labels: wheter top plot the gene labels
:param color_index: for a single class, choose a specific color
"""
colors = [(0/255.0,0/255.0,0/255.0),
(255/255.0,102/255.0,0/255.0),
(170/255.0,255/255.0,0/255.0),
(255/255.0,0/255.0,170/255.0),
(0/255.0,102/255.0,255/255.0),
(156/255.0,0/255.0,62/255.0),
(203/255.0,168/255.0,255/255.0),
(156/255.0,131/255.0,103/255.0),
(255/255.0,170/255.0,0/255.0),
(0/255.0,255/255.0,204/255.0),
(0/255.0,0/255.0,255/255.0),
(0/255.0,156/255.0,41/255.0),
(238/255.0,255/255.0,168/255.0),
(168/255.0,215/255.0,255/255.0),
(103/255.0,156/255.0,131/255.0),
(255/255.0,0/255.0,0/255.0),
(0/255.0,238/255.0,255/255.0),
(238/255.0,0/255.0,255/255.0),
(156/255.0,145/255.0,0/255.0),
(255/255.0,191/255.0,168/255.0),
(255/255.0,168/255.0,180/255.0),
(156/255.0,103/255.0,138/255.0)]
if color_index != None:
colors = [colors[int(color_index)]]
# Build the regions to be shaded differently
regions, prev = [], 0
for i in xrange(0, len(vfs_classes)-1):
if vfs_classes[i] != vfs_classes[i+1]:
regions.append([prev+0.5, i+0.5])
prev = i
regions.append([prev+0.5, len(vfs_classes)-0.5])
regions[0][0] = regions[0][0]-1.0
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
# aspect auto to widen
ax.matshow(matrix, cmap=cm.gray, aspect=aspect)
# Make sure every strain
ax.yaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_formatter(FormatStrFormatter('%s'))
ax.set_yticklabels(strain_labels)
if len(gene_labels) < 999:
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.xaxis.set_major_formatter(FormatStrFormatter('%s'))
ax.xaxis.grid(False)
if show_gene_labels:
ax.set_xticklabels([''] +gene_labels)#, rotation=90)
for i in xrange(0, len(regions)):
plt.axvspan(regions[i][0], regions[i][1], facecolor=colors[i], \
alpha=0.1)
if show_gene_labels:
ax.tick_params(axis='both', which='both', labelsize=6, direction='out',\
labelleft='on', labelright='off', \
labelbottom='off', labeltop='on', \
left='on', right='off', bottom='off', top='on')
else:
ax.tick_params(axis='both', which='both', labelsize=6, direction='out',\
labelleft='on', labelright='off', \
labelbottom='off', labeltop='off', \
left='on', right='off', bottom='off', top='off')
plt.xticks(rotation=90)
ax.grid(True)
fig.set_size_inches(10.0,12.0, dpi=600)
plt.savefig("results.png", bbox_inches='tight',dpi=600)
def do_run(vf_db, data_path, match_score, order, cutoff, vfs_list):
"""
Perform a VirFindR run
"""
matrix, y_label = [], []
in_files = glob.glob(data_path+"/*.fa")
# Reorder if requested
if order != None:
in_files = order_inputs(order, in_files)
for genome_file in in_files:
id = make_BLASTDB(genome_file)
y_label.append(id)
db_loc = genome_file.split('/')[-1]
run_BLAST(vf_db, "DBs/"+db_loc)
accepted_hits = parse_BLAST("blast.xml", float(cutoff))
row = build_matrix_row(vfs_list, accepted_hits, match_score)
row.insert(0,id)
matrix.append(row)
return matrix, y_label
def main():
default_no_hit = 0.5
global args
try:
os.mkdir("DBs")
except:
print "A DBs directory exists. Overwriting"
vfs_list, vfs_class = prepare_db(args.db)
results_a, ylab = do_run(args.db, args.ass, -0.15, args.index, \
args.tol, vfs_list)
if args.cons != None:
results_m, _ = do_run(args.db, args.cons, -0.85, args.index, \
args.tol, vfs_list)
if len(results_m) == len(results_a):
results_a, results_m = match_matrix_rows(results_a, results_m)
default_no_hit = 1.0
matrix = np.array(results_a) + np.array(results_m)
else:
print "Assemblies and mapping consensuses don't match"
sys.exit(1)
else:
args.reshape = False
results_a = strip_id_from_matrix(results_a)
matrix = np.array(results_a)
# cluster if not ordered
if args.index == None:
matrix, ylab = cluster_matrix(matrix, ylab)
np.savetxt("matrix.csv", matrix, delimiter=",")
# Add the buffer
newrow = [default_no_hit] * matrix.shape[1]
matrix = np.vstack([newrow, matrix])
matrix = np.vstack([newrow, matrix])
#Handle new option to only show presence
if args.reshape == True:
for x in np.nditer(matrix, op_flags=['readwrite']):
if x < 0.99:
x[...] = -1.0
ylab = ['', '']+ ylab
plot_matrix(matrix, ylab, vfs_class, vfs_list, args.label_genes, args.color)
# Handle labels here
#print vfs_class
os.system("rm blast.xml")
os.system("rm DBs/*")
if __name__ == '__main__':
try:
start_time = time.time()
desc = __doc__.split('\n\n')[1].strip()
parser = argparse.ArgumentParser(description=desc,epilog=epi)
parser.add_argument('-v', '--verbose', action='store_true', \
default=False, help='verbose output')
parser.add_argument('-o','--output',action='store', \
help='[Required] output prefix')
parser.add_argument('-d', '--db', action='store', \
help='[Required] full path database fasta file')
parser.add_argument('-a', '--ass', action='store', \
help='[Required] full path to dir containing '+\
'assemblies')
parser.add_argument('-t', '--tol', action='store', default=0.95, \
help='Similarity cutoff (default = 0.95)')
parser.add_argument('-m', '--cons', action='store', default=None, \
help=('full path to dir containing consensuses'\
+' (default = None)'))
parser.add_argument('-i', '--index', action='store', default=None, \
help=('maintain order of index (no cluster)' \
+' (default = None)'))
parser.add_argument('-l', '--label_genes', action='store_true', \
default=False, help=('label the x axis'
+' (default = False)'))
parser.add_argument('-c', '--color', action='store', default=None, \
help='color index (default = None)')
parser.add_argument('-r', '--reshape', action='store_false', \
default=True, help='Differentiate '
'between mapping and assembly hits')
args = parser.parse_args()
msg = "Missing required arguments.\nPlease run: SeqFindR -h"
if args.db == None:
print msg
sys.exit(1)
if args.ass == None:
print msg
sys.exit(1)
if args.output == None:
print msg
sys.exit(1)
if args.verbose: print "Executing @ " + time.asctime()
main()
if args.verbose: print "Ended @ " + time.asctime()
if args.verbose: print 'total time in minutes:',
if args.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
| |
import os
import pyperclip
from AwsProcessor import AwsProcessor
from stdplusAwsHelpers.AwsConnectionFactory import AwsConnectionFactory
from CommandArgumentParser import CommandArgumentParser
from stdplus import *
from pprint import pprint
class WrappedEvent:
def __init__(self,event):
# pprint(event)
self.event = event
self.logical_id = event.id
self.resource_status = event.logical_resource_id + ":" + event.resource_status
self.resource_status_reason = event.resource_status_reason
class WrappedOutput:
def __init__(self,output):
# pprint(output)
self.output = output
self.logical_id = output['OutputKey']
self.resource_status = output['OutputValue']
self.resource_status_reason = defaultifyDict(output,'Description','')
class WrappedParameter:
def __init__(self,parameter):
# pprint(output)
self.parameter = parameter
self.logical_id = parameter['ParameterKey']
self.resource_status = parameter['ParameterValue']
self.resource_status_reason = defaultifyDict(parameter,'Description','')
class AwsStack(AwsProcessor):
def __init__(self,stack,logicalName,parent):
"""Construct an AwsStack command processor"""
AwsProcessor.__init__(self,parent.raw_prompt + "/stack:" + logicalName,parent)
self.wrappedStack = self.wrapStack(stack)
self.printStack(self.wrappedStack)
def wrapStackEvents(self,stack):
events = {}
i = 0;
if None != stack.events:
for event in stack.events.all():
events[i] = WrappedEvent(event)
i = i + 1
return events;
def wrapStackParameters(self,stack):
parameters = {}
i = 0
if None != stack.parameters:
for parameter in iter(sorted(stack.parameters)):
parameters[i] = WrappedParameter(parameter)
i = i + 1
return parameters
def wrapStackOutputs(self,stack):
outputs = {}
i = 0
if None != stack.outputs:
for output in stack.outputs:
outputs[i] = WrappedOutput(output)
i = i + 1
return outputs
def wrapStack(self,stack):
result = {};
result['rawStack'] = stack;
resourcesByType = {};
for resource in stack.resource_summaries.all():
if not resource.resource_type in resourcesByType:
resourcesByType[resource.resource_type] = {}
resourcesByType[resource.resource_type][resource.logical_id] = resource;
resourcesByType['events'] = self.wrapStackEvents(stack)
resourcesByType['outputs'] = self.wrapStackOutputs(stack)
resourcesByType['parameters'] = self.wrapStackParameters(stack)
result['resourcesByTypeName'] = resourcesByType;
resourcesByTypeIndex = {};
for resourceType, resources in resourcesByType.items():
resourcesByTypeIndex[resourceType] = {};
index = 0
for name,resource in resources.items():
resourcesByTypeIndex[resourceType][index] = resource
index += 1
result['resourcesByTypeIndex'] = resourcesByTypeIndex;
return result
def printStack(self,wrappedStack,include=None,filters=["*"]):
"""Prints the stack"""
rawStack = wrappedStack['rawStack']
print "==== Stack {} ====".format(rawStack.name)
print "Status: {} {}".format(rawStack.stack_status,defaultify(rawStack.stack_status_reason,''))
for resourceType, resources in wrappedStack['resourcesByTypeIndex'].items():
if resourceType in AwsProcessor.resourceTypeAliases:
resourceType = AwsProcessor.resourceTypeAliases[resourceType];
if (None == include or resourceType in include) and len(resources):
print "== {}:".format(resourceType)
logicalIdWidth = 1
resourceStatusWidth = 1
resourceStatusReasonWidth = 1
for index, resource in resources.items():
logicalIdWidth = max(logicalIdWidth,len(resource.logical_id))
resourceStatusWidth = min(50,max(resourceStatusWidth,len(resource.resource_status)))
resourceStatusReasonWidth = min(50,max(resourceStatusReasonWidth,len(defaultify(resource.resource_status_reason,''))))
frm = " {{0:3d}}: {{1:{0}}} {{2:{1}}} {{3}}".format(logicalIdWidth,resourceStatusWidth)
for index, resource in resources.items():
if fnmatches(resource.logical_id.lower(),filters):
print frm.format(index,resource.logical_id,
elipsifyMiddle(repr(resource.resource_status),50),
elipsifyMiddle(repr(defaultify(resource.resource_status_reason,'')),150))
def do_browse(self,args):
"""Open the current stack in a browser."""
rawStack = self.wrappedStack['rawStack']
os.system("open -a \"Google Chrome\" https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stack/detail?stackId={}".format(rawStack.stack_id))
def do_refresh(self,args):
"""Refresh view of the current stack. refresh -h for detailed help"""
self.wrappedStack = self.wrapStack(AwsConnectionFactory.instance.getCfResource().Stack(self.wrappedStack['rawStack'].name))
def do_print(self,args):
"""Print the current stack. print -h for detailed help"""
parser = CommandArgumentParser("print")
parser.add_argument('-r','--refresh',dest='refresh',action='store_true',help='refresh view of the current stack')
parser.add_argument('-i','--include',dest='include',default=None,nargs='+',help='resource types to include')
parser.add_argument(dest='filters',nargs='*',default=["*"],help='Filter stacks');
args = vars(parser.parse_args(args))
if args['refresh']:
self.do_refresh('')
self.printStack(self.wrappedStack,args['include'],args['filters'])
def do_resource(self,args):
"""Go to the specified resource. resource -h for detailed help"""
parser = CommandArgumentParser("resource")
parser.add_argument('-i','--logical-id',dest='logical-id',help='logical id of the child resource');
args = vars(parser.parse_args(args))
stackName = self.wrappedStack['rawStack'].name
logicalId = args['logical-id']
self.stackResource(stackName,logicalId)
def do_asg(self,args):
"""Go to the specified auto scaling group. asg -h for detailed help"""
parser = CommandArgumentParser("asg")
parser.add_argument(dest='asg',help='asg index or name');
args = vars(parser.parse_args(args))
print "loading auto scaling group {}".format(args['asg'])
try:
index = int(args['asg'])
asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index]
except:
asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][args['asg']]
self.stackResource(asgSummary.stack_name,asgSummary.logical_id)
def do_eni(self,args):
"""Go to the specified eni. eni -h for detailed help."""
parser = CommandArgumentParser("eni")
parser.add_argument(dest='eni',help='eni index or name');
args = vars(parser.parse_args(args))
print "loading eni {}".format(args['eni'])
try:
index = int(args['eni'])
eniSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::EC2::NetworkInterface'][index]
except ValueError:
eniSummary = self.wrappedStack['resourcesByTypeName']['AWS::EC2::NetworkInterface'][args['eni']]
pprint(eniSummary)
self.stackResource(eniSummary.stack_name,eniSummary.logical_id)
def do_logGroup(self,args):
"""Go to the specified log group. logGroup -h for detailed help"""
parser = CommandArgumentParser("logGroup")
parser.add_argument(dest='logGroup',help='logGroup index or name');
args = vars(parser.parse_args(args))
print "loading log group {}".format(args['logGroup'])
try:
index = int(args['logGroup'])
logGroup = self.wrappedStack['resourcesByTypeIndex']['AWS::Logs::LogGroup'][index]
except:
logGroup = self.wrappedStack['resourcesByTypeName']['AWS::Logs::LogGroup'][args['logGroup']]
print "logGroup:{}".format(logGroup)
self.stackResource(logGroup.stack_name,logGroup.logical_id)
def do_role(self,args):
"""Go to the specified log group. role -h for detailed help"""
parser = CommandArgumentParser("role")
parser.add_argument(dest='role',help='role index or name');
args = vars(parser.parse_args(args))
print "loading role {}".format(args['role'])
try:
index = int(args['role'])
role = self.wrappedStack['resourcesByTypeIndex']['AWS::IAM::Role'][index]
except:
role = self.wrappedStack['resourcesByTypeName']['AWS::IAM::Role'][args['role']]
print "role:{}".format(role)
self.stackResource(role.stack_name,role.logical_id)
def do_stack(self,args):
"""Go to the specified stack. stack -h for detailed help."""
parser = CommandArgumentParser("stack")
parser.add_argument(dest='stack',help='stack index or name');
args = vars(parser.parse_args(args))
print "loading stack {}".format(args['stack'])
try:
index = int(args['stack'])
stackSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::CloudFormation::Stack'][index]
except ValueError:
stackSummary = self.wrappedStack['resourcesByTypeName']['AWS::CloudFormation::Stack'][args['stack']]
self.stackResource(stackSummary.stack_name,stackSummary.logical_id)
def do_template(self,args):
"""Print the template for the current stack. template -h for detailed help"""
parser = CommandArgumentParser("template")
args = vars(parser.parse_args(args))
print "reading template for stack."
rawStack = self.wrappedStack['rawStack']
template = AwsConnectionFactory.getCfClient().get_template(StackName=rawStack.name)
print template['TemplateBody']
def getWrappedItem(self,typeName,IdOrName):
try:
index = int(IdOrName)
return self.wrappedStack['resourcesByTypeIndex'][typeName][index]
except ValueError:
return self.wrappedStack['resourcesByTypeName'][typeName][IdOrName]
def getOutputs(self,outputs):
values = []
for output in outputs:
values.append(self.getWrappedItem('outputs',output).resource_status)
return values
def do_copy(self,args):
"""Copy specified id to stack. copy -h for detailed help."""
parser = CommandArgumentParser("copy")
parser.add_argument('-a','--asg',dest='asg',nargs='+',required=False,default=[],help='Copy specified ASG info.')
parser.add_argument('-o','--output',dest='output',nargs='+',required=False,default=[],help='Copy specified output info.')
args = vars(parser.parse_args(args))
values = []
if args['output']:
values.extend(self.getOutputs(args['output']))
if args['asg']:
for asg in args['asg']:
try:
index = int(asg)
asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index]
except:
asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][asg]
values.append(asgSummary.physical_resource_id)
print("values:{}".format(values))
pyperclip.copy("\n".join(values))
def do_stacks(self,args):
"""Same as print -r --include stack"""
self.do_print(args + " -r --include stack" )
def do_parameter(self,args):
"""Print a parameter"""
parser = CommandArgumentParser("parameter")
parser.add_argument(dest="id",help="Parameter to print")
args = vars(parser.parse_args(args))
print "printing parameter {}".format(args['id'])
try:
index = int(args['id'])
parameter = self.wrappedStack['resourcesByTypeName']['parameters'][index]
except ValueError:
parameter = self.wrappedStack['resourcesByTypeName']['parameters'][args['id']]
print(parameter.resource_status)
| |
#coding: cp1251
from __future__ import absolute_import, print_function
from pony.py23compat import PY2, imap, basestring, unicode
import re, os, os.path, sys, datetime, types, linecache, warnings, json
from itertools import count as _count
from inspect import isfunction, ismethod, getargspec
from time import strptime
from os import urandom
from codecs import BOM_UTF8, BOM_LE, BOM_BE
from locale import getpreferredencoding
from bisect import bisect
from collections import defaultdict
from copy import deepcopy, _deepcopy_dispatch
from functools import update_wrapper
from xml.etree import cElementTree
# deepcopy instance method patch for Python < 2.7:
if types.MethodType not in _deepcopy_dispatch:
assert PY2
def _deepcopy_method(x, memo):
return type(x)(x.im_func, deepcopy(x.im_self, memo), x.im_class)
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
import pony
from pony import options
from pony.thirdparty.compiler import ast
from pony.thirdparty.decorator import decorator as _decorator
if pony.MODE.startswith('GAE-'): localbase = object
else: from threading import local as localbase
class PonyDeprecationWarning(DeprecationWarning):
pass
def deprecated(stacklevel, message):
warnings.warn(message, PonyDeprecationWarning, stacklevel)
warnings.simplefilter('once', PonyDeprecationWarning)
def _improved_decorator(caller, func):
if isfunction(func):
return _decorator(caller, func)
def pony_wrapper(*args, **kwargs):
return caller(func, *args, **kwargs)
return pony_wrapper
def decorator(caller, func=None):
if func is not None:
return _improved_decorator(caller, func)
def new_decorator(func):
return _improved_decorator(caller, func)
if isfunction(caller):
update_wrapper(new_decorator, caller)
return new_decorator
##def simple_decorator(dec):
## def new_dec(func):
## def pony_wrapper(*args, **kwargs):
## return dec(func, *args, **kwargs)
## return copy_func_attrs(pony_wrapper, func, dec.__name__)
## return copy_func_attrs(new_dec, dec, 'simple_decorator')
##@simple_decorator
##def decorator_with_params(dec, *args, **kwargs):
## if len(args) == 1 and not kwargs:
## func = args[0]
## new_func = dec(func)
## return copy_func_attrs(new_func, func, dec.__name__)
## def parameterized_decorator(old_func):
## new_func = dec(func, *args, **kwargs)
## return copy_func_attrs(new_func, func, dec.__name__)
## return parameterized_decorator
def decorator_with_params(dec):
def parameterized_decorator(*args, **kwargs):
if len(args) == 1 and isfunction(args[0]) and not kwargs:
return decorator(dec(), args[0])
return decorator(dec(*args, **kwargs))
return parameterized_decorator
@decorator
def cut_traceback(func, *args, **kwargs):
if not (pony.MODE == 'INTERACTIVE' and options.CUT_TRACEBACK):
return func(*args, **kwargs)
try: return func(*args, **kwargs)
except AssertionError: raise
except Exception:
exc_type, exc, tb = sys.exc_info()
last_pony_tb = None
try:
while tb.tb_next:
module_name = tb.tb_frame.f_globals['__name__']
if module_name == 'pony' or (module_name is not None # may be None during import
and module_name.startswith('pony.')):
last_pony_tb = tb
tb = tb.tb_next
if last_pony_tb is None: raise
if tb.tb_frame.f_globals.get('__name__') == 'pony.utils' and tb.tb_frame.f_code.co_name == 'throw':
reraise(exc_type, exc, last_pony_tb)
raise exc # Set "pony.options.CUT_TRACEBACK = False" to see full traceback
finally:
del exc, tb, last_pony_tb
if PY2:
exec('''def reraise(exc_type, exc, tb):
try: raise exc_type, exc, tb
finally: del tb''')
else:
def reraise(exc_type, exc, tb):
try: raise exc.with_traceback(tb)
finally: del exc, tb
def throw(exc_type, *args, **kwargs):
if isinstance(exc_type, Exception):
assert not args and not kwargs
exc = exc_type
else: exc = exc_type(*args, **kwargs)
exc.__cause__ = None
try:
if not (pony.MODE == 'INTERACTIVE' and options.CUT_TRACEBACK):
raise exc
else:
raise exc # Set "pony.options.CUT_TRACEBACK = False" to see full traceback
finally: del exc
lambda_args_cache = {}
def get_lambda_args(func):
names = lambda_args_cache.get(func)
if names is not None: return names
if type(func) is types.FunctionType:
names, argsname, kwname, defaults = getargspec(func)
elif isinstance(func, ast.Lambda):
names = func.argnames
if func.kwargs: names, kwname = names[:-1], names[-1]
else: kwname = None
if func.varargs: names, argsname = names[:-1], names[-1]
else: argsname = None
defaults = func.defaults
else: assert False # pragma: no cover
if argsname: throw(TypeError, '*%s is not supported' % argsname)
if kwname: throw(TypeError, '**%s is not supported' % kwname)
if defaults: throw(TypeError, 'Defaults are not supported')
lambda_args_cache[func] = names
return names
_cache = {}
MAX_CACHE_SIZE = 1000
@decorator
def cached(f, *args, **kwargs):
key = (f, args, tuple(sorted(kwargs.items())))
value = _cache.get(key)
if value is not None: return value
if len(_cache) == MAX_CACHE_SIZE: _cache.clear()
return _cache.setdefault(key, f(*args, **kwargs))
def error_method(*args, **kwargs):
raise TypeError()
_ident_re = re.compile(r'^[A-Za-z_]\w*\Z')
# is_ident = ident_re.match
def is_ident(string):
'is_ident(string) -> bool'
return bool(_ident_re.match(string))
_name_parts_re = re.compile(r'''
[A-Z][A-Z0-9]+(?![a-z]) # ACRONYM
| [A-Z][a-z]* # Capitalized or single capital
| [a-z]+ # all-lowercase
| [0-9]+ # numbers
| _+ # underscores
''', re.VERBOSE)
def split_name(name):
"split_name('Some_FUNNYName') -> ['Some', 'FUNNY', 'Name']"
if not _ident_re.match(name):
raise ValueError('Name is not correct Python identifier')
list = _name_parts_re.findall(name)
if not (list[0].strip('_') and list[-1].strip('_')):
raise ValueError('Name must not starting or ending with underscores')
return [ s for s in list if s.strip('_') ]
def uppercase_name(name):
"uppercase_name('Some_FUNNYName') -> 'SOME_FUNNY_NAME'"
return '_'.join(s.upper() for s in split_name(name))
def lowercase_name(name):
"uppercase_name('Some_FUNNYName') -> 'some_funny_name'"
return '_'.join(s.lower() for s in split_name(name))
def camelcase_name(name):
"uppercase_name('Some_FUNNYName') -> 'SomeFunnyName'"
return ''.join(s.capitalize() for s in split_name(name))
def mixedcase_name(name):
"mixedcase_name('Some_FUNNYName') -> 'someFunnyName'"
list = split_name(name)
return list[0].lower() + ''.join(s.capitalize() for s in list[1:])
def import_module(name):
"import_module('a.b.c') -> <module a.b.c>"
mod = sys.modules.get(name)
if mod is not None: return mod
mod = __import__(name)
components = name.split('.')
for comp in components[1:]: mod = getattr(mod, comp)
return mod
if sys.platform == 'win32':
_absolute_re = re.compile(r'^(?:[A-Za-z]:)?[\\/]')
else: _absolute_re = re.compile(r'^/')
def is_absolute_path(filename):
return bool(_absolute_re.match(filename))
def absolutize_path(filename, frame_depth):
if is_absolute_path(filename): return filename
code_filename = sys._getframe(frame_depth+1).f_code.co_filename
if not is_absolute_path(code_filename):
if code_filename.startswith('<') and code_filename.endswith('>'):
if pony.MODE == 'INTERACTIVE': raise ValueError(
'When in interactive mode, please provide absolute file path. Got: %r' % filename)
raise EnvironmentError('Unexpected module filename, which is not absolute file path: %r' % code_filename)
code_path = os.path.dirname(code_filename)
return os.path.join(code_path, filename)
def shortened_filename(filename):
if pony.MAIN_DIR is None: return filename
maindir = pony.MAIN_DIR + os.sep
if filename.startswith(maindir): return filename[len(maindir):]
return filename
def get_mtime(filename):
stat = os.stat(filename)
mtime = stat.st_mtime
if sys.platform == "win32": mtime -= stat.st_ctime
return mtime
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_source_encoding(filename):
for i, line in enumerate(linecache.getlines(filename)):
if i == 0 and line.startswith(BOM_UTF8): return 'utf-8'
if not line.lstrip().startswith('#'): continue
match = coding_re.search(line)
if match is not None: return match.group(1)
else: return options.SOURCE_ENCODING or getpreferredencoding()
escape_re = re.compile(r'''
(?<!\\)\\ # single backslash
(?:
x[0-9a-f]{2} # byte escaping
| u[0-9a-f]{4} # unicode escaping
| U[0-9a-f]{8} # long unicode escaping
)
''', re.VERBOSE)
def restore_escapes(s, console_encoding=None, source_encoding=None):
if not options.RESTORE_ESCAPES: return s
if source_encoding is None:
source_encoding = options.SOURCE_ENCODING or getpreferredencoding()
if console_encoding is None:
try: console_encoding = getattr(sys.stderr, 'encoding', None)
except: console_encoding = None # workaround for PythonWin win32ui.error "The MFC object has died."
console_encoding = console_encoding or options.CONSOLE_ENCODING
console_encoding = console_encoding or getpreferredencoding()
try: s = s.decode(source_encoding).encode(console_encoding)
except (UnicodeDecodeError, UnicodeEncodeError): pass
def f(match):
esc = match.group()
code = int(esc[2:], 16)
if esc.startswith('\\x'):
if code < 32: return esc
try: return chr(code).decode(source_encoding).encode(console_encoding)
except (UnicodeDecodeError, UnicodeEncodeError): return esc
char = unichr(code)
try: return char.encode(console_encoding)
except UnicodeEncodeError: return esc
return escape_re.sub(f, s)
def current_timestamp():
return datetime2timestamp(datetime.datetime.now())
def datetime2timestamp(d):
result = d.isoformat(' ')
if len(result) == 19: return result + '.000000'
return result
def timestamp2datetime(t):
time_tuple = strptime(t[:19], '%Y-%m-%d %H:%M:%S')
microseconds = int((t[20:26] + '000000')[:6])
return datetime.datetime(*(time_tuple[:6] + (microseconds,)))
def read_text_file(fname, encoding=None):
text = file(fname).read()
for bom, enc in [ (BOM_UTF8, 'utf8'), (BOM_LE, 'utf-16le'), (BOM_BE, 'utf-16be') ]:
if text[:len(bom)] == bom: return text[len(bom):].decode(enc)
try: return text.decode('utf8')
except UnicodeDecodeError:
try: return text.decode(encoding or getpreferredencoding())
except UnicodeDecodeError:
return text.decode('ascii', 'replace')
def compress(s):
zipped = s.encode('zip')
if len(zipped) < len(s): return 'Z' + zipped
return 'N' + s
def decompress(s):
first = s[0]
if first == 'N': return s[1:]
elif first == 'Z': return s[1:].decode('zip')
raise ValueError('Incorrect data')
class JsonString(unicode): pass
def json_result(obj, **kwargs):
result = JsonString(json.dumps(obj, **kwargs))
result.media_type = 'application/json'
if 'encoding' in kwargs: result.charset = kwargs['encoding']
return result
expr1_re = re.compile(r'''
([A-Za-z_]\w*) # identifier (group 1)
| ([(]) # open parenthesis (group 2)
''', re.VERBOSE)
expr2_re = re.compile(r'''
\s*(?:
(;) # semicolon (group 1)
| (\.\s*[A-Za-z_]\w*) # dot + identifier (group 2)
| ([([]) # open parenthesis or braces (group 3)
)
''', re.VERBOSE)
expr3_re = re.compile(r"""
[()[\]] # parenthesis or braces (group 1)
| '''(?:[^\\]|\\.)*?''' # '''triple-quoted string'''
| \"""(?:[^\\]|\\.)*?\""" # \"""triple-quoted string\"""
| '(?:[^'\\]|\\.)*?' # 'string'
| "(?:[^"\\]|\\.)*?" # "string"
""", re.VERBOSE)
def parse_expr(s, pos=0):
z = 0
match = expr1_re.match(s, pos)
if match is None: raise ValueError()
start = pos
i = match.lastindex
if i == 1: pos = match.end() # identifier
elif i == 2: z = 2 # "("
else: assert False # pragma: no cover
while True:
match = expr2_re.match(s, pos)
if match is None: return s[start:pos], z==1
pos = match.end()
i = match.lastindex
if i == 1: return s[start:pos], False # ";" - explicit end of expression
elif i == 2: z = 2 # .identifier
elif i == 3: # "(" or "["
pos = match.end()
counter = 1
open = match.group(i)
if open == '(': close = ')'
elif open == '[': close = ']'; z = 2
else: assert False # pragma: no cover
while True:
match = expr3_re.search(s, pos)
if match is None: raise ValueError()
pos = match.end()
x = match.group()
if x == open: counter += 1
elif x == close:
counter -= 1
if not counter: z += 1; break
else: assert False # pragma: no cover
def tostring(x):
if isinstance(x, basestring): return x
if hasattr(x, '__unicode__'):
try: return unicode(x)
except: pass
if hasattr(x, 'makeelement'): return cElementTree.tostring(x)
try: return str(x)
except: pass
try: return repr(x)
except: pass
if type(x) == types.InstanceType: return '<%s instance at 0x%X>' % (x.__class__.__name__)
return '<%s object at 0x%X>' % (x.__class__.__name__)
def strjoin(sep, strings, source_encoding='ascii', dest_encoding=None):
"Can join mix of unicode and byte strings in different encodings"
strings = list(strings)
try: return sep.join(strings)
except UnicodeDecodeError: pass
for i, s in enumerate(strings):
if isinstance(s, str):
strings[i] = s.decode(source_encoding, 'replace').replace(u'\ufffd', '?')
result = sep.join(strings)
if dest_encoding is None: return result
return result.encode(dest_encoding, replace)
def make_offsets(s):
offsets = [ 0 ]
si = -1
try:
while True:
si = s.index('\n', si + 1)
offsets.append(si + 1)
except ValueError: pass
offsets.append(len(s))
return offsets
def pos2lineno(pos, offsets):
line = bisect(offsets, pos, 0, len(offsets)-1)
if line == 1: offset = pos
else: offset = pos - offsets[line - 1]
return line, offset
def getline(text, offsets, lineno):
return text[offsets[lineno-1]:offsets[lineno]]
def getlines(text, offsets, lineno, context=1):
if context <= 0: return [], None
start = max(0, lineno - 1 - context//2)
end = min(len(offsets)-1, start + context)
start = max(0, end - context)
lines = []
for i in range(start, end): lines.append(text[offsets[i]:offsets[i+1]])
index = lineno - 1 - start
return lines, index
def getlines2(filename, lineno, context=1):
if context <= 0: return [], None
lines = linecache.getlines(filename)
if not lines: return [], None
start = max(0, lineno - 1 - context//2)
end = min(len(lines), start + context)
start = max(0, end - context)
lines = lines[start:start+context]
index = lineno - 1 - start
return lines, index
def count(*args, **kwargs):
if kwargs: return _count(*args, **kwargs)
if len(args) != 1: return _count(*args)
arg = args[0]
if hasattr(arg, 'count'): return arg.count()
try: it = iter(arg)
except TypeError: return _count(arg)
return len(set(it))
def avg(iter):
count = 0
sum = 0.0
for elem in iter:
if elem is None: continue
sum += elem
count += 1
if not count: return None
return sum / count
def distinct(iter):
d = defaultdict(int)
for item in iter:
d[item] = d[item] + 1
return d
def concat(*args):
return ''.join(tostring(arg) for arg in args)
def is_utf8(encoding):
return encoding.upper().replace('_', '').replace('-', '') in ('UTF8', 'UTF', 'U8')
| |
from machine import Timer
import time
import gc
import binascii
class L76GNSS:
GPS_I2CADDR = const(0x10)
def __init__(self, pytrack=None, sda='P22', scl='P21', timeout=None):
if pytrack is not None:
self.i2c = pytrack.i2c
else:
from machine import I2C
self.i2c = I2C(0, mode=I2C.MASTER, pins=(sda, scl))
self.chrono = Timer.Chrono()
self.timeout = timeout
self.timeout_status = True
self.reg = bytearray(1)
self.i2c.writeto(GPS_I2CADDR, self.reg)
def _read(self):
self.reg = self.i2c.readfrom(GPS_I2CADDR, 128) #Changed from 64 to 128 - I2C L76 says it can read till 255 bytes
return self.reg
def _convert_coords(self, gngll_s):
lat = gngll_s[1]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngll_s[3]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngll_s[2] == 'S':
lat_d *= -1
if gngll_s[4] == 'W':
lon_d *= -1
return(lat_d, lon_d)
#diff indexes from original - Using GGA sentence
def _convert_coords1(self, gngga_s):
lat = gngga_s[2]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngga_s[4]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngga_s[3] == 'S':
lat_d *= -1
if gngga_s[5] == 'W':
lon_d *= -1
return(lat_d, lon_d)
def _get_time(self, gngga_s):
gps_time = gngga_s[1]
return(gps_time)
def _get_altitude(self, gngga_s):
gps_altitude = gngga_s[9]
return(gps_altitude)
def _get_satellites(self, gngga_s):
num_satellites = gngga_s[7]
return(num_satellites)
def _fix_quality(self, gngga_s):
valid = gngga_s[6]
if valid == '0':
return False
else:
return True
#Using RMC sentence
def _get_time_rmc(self, gnrmc_s):
gps_time = gnrmc_s[1]
return(gps_time)
def _data_valid_rmc(self, gnrmc_s):
valid = gnrmc_s[2]
if valid == 'A':
return True
else:
return False
def _get_date_rmc(self, gnrmc_s):
gps_date = gnrmc_s[9]
return(gps_date)
def coordinates(self, debug=False):
lat_d, lon_d, debug_timeout = None, None, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gngll_idx = nmea.find(b'GNGLL')
if gngll_idx >= 0:
gngll = nmea[gngll_idx:]
e_idx = gngll.find(b'\r\n')
if e_idx >= 0:
try:
gngll = gngll[:e_idx].decode('ascii')
gngll_s = gngll.split(',')
lat_d, lon_d = self._convert_coords(gngll_s)
except Exception:
pass
finally:
nmea = nmea[(gngll_idx + e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None)
else:
return(lat_d, lon_d)
#TEST functions
#Parser for GPGGA
def coordinates1(self, debug=False):
lat_d, lon_d, gps_time, valid, gps_altitude, num_satellites, debug_timeout = None, None, None, None, None, False, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gpgga_idx = nmea.find(b'GPGGA')
if gpgga_idx >= 0:
gpgga = nmea[gpgga_idx:]
gpgga_e_idx = gpgga.find(b'\r\n')
if gpgga_e_idx >= 0:
try:
gpgga = gpgga[:gpgga_e_idx].decode('ascii')
gpgga_s = gpgga.split(',')
lat_d, lon_d = self._convert_coords1(gpgga_s)
gps_time = self._get_time(gpgga_s)
valid = self._fix_quality(gpgga_s)
gps_altitude = self._get_altitude(gpgga_s)
num_satellites = self._get_satellites(gpgga_s)
except Exception:
pass
finally:
nmea = nmea[(gpgga_idx + gpgga_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, gps_altitude, valid, num_satellites)
#parser for UTC time and date >> Reads GPRMC
def get_datetime(self, debug=False):
lat_d, lon_d, gps_time, valid, gps_date, rmc_idx, debug_timeout = None, None, None, None, None, -1, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
#Since or spg or glonass could give date see which one is present -SEE page 10 GNSS protocol
#GPS only - GPRMC GPGGA
#Glonass only - GNRMC GPGGA
#GPS+GLON - GNRMC GPGGA
#No station - GPRMC GPGGA
gprmc_idx = nmea.find(b'GPRMC')
gnrmc_idx = nmea.find(b'GNRMC')
if gprmc_idx >= 0:
rmc_idx = gprmc_idx
if gnrmc_idx >= 0:
rmc_idx = gnrmc_idx
if rmc_idx >= 0:
rmc = nmea[rmc_idx:]
rmc_e_idx = rmc.find(b'\r\n')
if rmc_e_idx >= 0:
try:
rmc = rmc[:rmc_e_idx].decode('ascii')
rmc_s = rmc.split(',')
lat_d, lon_d = self._convert_coords1(rmc_s[1:])
gps_time = self._get_time_rmc(rmc_s)
valid = self._data_valid_rmc(rmc_s)
gps_date = self._get_date_rmc(rmc_s)
except Exception:
pass
finally:
nmea = nmea[(rmc_idx + rmc_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 512: # i suppose it can be safely changed to 82, which is longest NMEA frame --CHANGED to 512
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, valid, gps_date)
| |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime
from Models import Candidate
from Models import Recruiter
from Models import Client
from Models import Position
from Models import Interview
from Models import init_database
class DataProviderService:
def __init__(self, engine):
"""
:param engine: The engine route and login details
:return: a new instance of DAL class
:type engine: string
"""
if not engine:
raise ValueError('The values specified in engine parameter has to be supported by SQLAlchemy')
self.engine = engine
db_engine = create_engine(engine)
db_session = sessionmaker(bind=db_engine)
self.session = db_session()
def init_database(self):
"""
Initializes the database tables and relationships
:return: None
"""
init_database(self.engine)
def add_candidate(self, first_name, last_name, email, birthday=None, phone=None, languages="", skills=""):
"""
Creates and saves a new candidate to the database.
:param first_name: First Name of the candidate
:param last_name: Last Name of the candidate
:param email: Email address of the candidate
:param birthday: Birthday of the candidate
:param phone: Telephone number of the candidate
:param languages: Language skills of the candidate
:param skills: Skills which the candidate has
:return: The id of the new Candidate
"""
new_candidate = Candidate(first_name=first_name,
last_name=last_name,
email=email,
birthday=birthday,
phone=phone,
languages=languages,
skills=skills)
self.session.add(new_candidate)
self.session.commit()
return new_candidate.id
def get_candidate(self, id=None, serialize=False):
"""
If the id parameter is defined then it looks up the candidate with the given id,
otherwise it loads all the candidates
:param id: The id of the candidate which needs to be loaded (default value is None)
:return: The candidate or candidates.
"""
all_candidates = []
if id is None:
all_candidates = self.session.query(Candidate).order_by(Candidate.last_name).all()
else:
all_candidates = self.session.query(Candidate).filter(Candidate.id==id).all()
if serialize:
return [cand.serialize() for cand in all_candidates]
else:
return all_candidates
def update_candidate(self, id, new_candidate):
updated_candidate = None
candidates = self.get_candidate(id)
candidate = None
if len(candidates) is not 1:
return updated_candidate
else:
candidate = candidates[0]
if candidate:
candidate.email = new_candidate["email"]
candidate.phone = new_candidate["phone"]
candidate.first_name = new_candidate["first_name"]
candidate.last_name = new_candidate["last_name"]
self.session.add(candidate)
self.session.commit()
updated_candidate = self.get_candidate(id)[0]
return updated_candidate.serialize()
def delete_candidate(self, id):
if id:
items_deleted = self.session.query(Candidate).filter(Candidate.id == id).delete()
return items_deleted > 0
return False
def fill_database(self):
#
# Candidates
#
cand1 = Candidate(first_name="John",
last_name="Doe",
email="john@example.com",
birthday=datetime.date(1979, 3, 4),
phone="1-233-332",
languages='{ "English": "mother tongue", '
' "French" : "beginner" }',
skills=".NET JavaScript Python Node.js MySQL")
cand2 = Candidate(first_name="Jane",
last_name="Doe",
email="jane@example.com",
birthday=datetime.date(1984, 7, 9),
phone="1-737-372",
languages='{ "English": "mother tongue", '
' "French" : "beginner",'
' "German" : "intermediate" }',
skills="Ruby Java PHP CakePHP")
cand3 = Candidate(first_name="Bob",
last_name="Coder",
email="bc@bobthecoder.com",
birthday=datetime.date(1988, 11, 3),
phone="1-113-333",
languages='{ "English": "mother tongue", '
' "Japanese" : "beginner",'
' "Swedish" : "intermediate" }',
skills="Electrical Engineering High Voltage Ruby Java JavaScript MongoDB Oracle PHP")
self.session.add(cand1)
self.session.add(cand2)
self.session.add(cand3)
self.session.commit()
#
# Recruiters
#
recr1 = Recruiter(first_name="Bill",
last_name="Oak",
phone="1-454-998")
recr2 = Recruiter(first_name="Vanessa",
last_name="Albright",
phone="1-119-238")
recr3 = Recruiter(first_name="Kate",
last_name="Mingley",
phone="2-542-977")
self.session.add(recr1)
self.session.add(recr2)
self.session.add(recr3)
self.session.commit()
#
# Clients
#
client1 = Client(name="Capital Inc.",
phone="326-554-975",
email="admin@capital.inc")
client2 = Client(name="Red Black Tree Inc",
phone="121-554-775",
email="info@redblacktreeinc.com")
client3 = Client(name="My House Builder Company",
phone="663-514-075",
email="hr@myhouseb.com")
self.session.add(client1)
self.session.add(client2)
self.session.add(client3)
self.session.commit()
#
# Positions
#
cl1_pos1 = Position(name="Python developer",
description="Our company needs a highly experienced senior Python developer with "
"skills in large Python code handling.",
tech_skills="Python SQLAlchemy GIT SVN OOP",
years_of_experience=7,
salary=65000,
client=client1.id,
recruiter=recr1.id)
cl1_pos2 = Position(name="Ruby developer",
description="Our company needs an experienced Ruby web developer with "
"skills in CSS and JavaScript.",
tech_skills="Ruby CSS JavaScript",
years_of_experience=3,
salary=58000,
client=client1.id,
recruiter=recr1.id)
# Client 2
cl2_pos1 = Position(name="Electrical Engineer",
description="Our company needs an expert on Electrical Engineering.",
tech_skills="Physics Electricity Engineering Planning ",
years_of_experience=10,
salary=85000,
client=client2.id,
recruiter=recr2.id)
cl2_pos2 = Position(name="Carpenter",
description="We are looking for a carpenter with experience in Alaska.",
tech_skills="Carpenter Wood-Structure Scaffold Shelving",
years_of_experience=6,
salary=61000,
client=client2.id,
recruiter=recr2.id)
# Client 3
cl3_pos1 = Position(name="Txi driver",
description="Our company needs Taxi Drivers in Boston.",
tech_skills="Taxi-license Car Driver-license",
years_of_experience=2,
salary=45000,
client=client3.id,
recruiter=recr3.id)
cl3_pos2 = Position(name="Mason",
description="We are looking for a mason who has experience working with clay",
tech_skills="Masonary Clay Building Planning",
years_of_experience=3,
salary=43000,
client=client3.id,
recruiter=recr3.id)
self.session.add(cl1_pos1)
self.session.add(cl1_pos2)
self.session.add(cl2_pos1)
self.session.add(cl2_pos2)
self.session.add(cl3_pos1)
self.session.add(cl3_pos2)
self.session.commit()
#
# Interviews
#
int1 = Interview(date=datetime.date(2015, 4, 3),
feedback="The candidate is perfect fit for the position.",
position=cl1_pos1.id,
recruiter_id=recr1.id,
candidate=cand1.id)
int2 = Interview(date=datetime.date(2015, 6, 13),
feedback="The candidate is not good for the position.",
position=cl1_pos2.id,
recruiter_id=recr1.id,
candidate=cand2.id)
int3 = Interview(date=datetime.date(2015, 7, 22),
feedback="The candidate is good for the position.",
position=cl2_pos1.id,
recruiter_id=recr2.id,
candidate=cand3.id)
self.session.add(int1)
self.session.add(int2)
self.session.add(int3)
self.session.commit()
| |
"""
Shogun demo, based on PyQT Demo by Eli Bendersky
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Christian Widmer, Soeren Sonnenburg
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
from shogun import *
from shogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'o', color='0.7')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'o', color='0.5')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
self.k.setEnabled(True)
def train_svm(self):
k = int(self.k.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ko')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'ko')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
distance_name = self.distance_combo.currentText()
if distance_name == "EuclideanDistance":
distance=EuclideanDistance(train, train)
elif distance_name == "ManhattanMetric":
distance=ManhattanMetric(train, train)
elif distance_name == "JensenMetric":
distance=JensenMetric(train, train)
kmeans=KMeans(k, distance)
kmeans.train()
centers = kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
self.axes.plot(features[0,labels==+1], features[1,labels==+1],'ro')
self.axes.plot(features[0,labels==-1], features[1,labels==-1],'bo')
for i in xrange(k):
self.axes.plot(centers[0,i],centers[1,i],'kx', markersize=20, linewidth=5)
t = numpy.linspace(0, 2*numpy.pi, 100)
self.axes.plot(radi[i]*numpy.cos(t)+centers[0,i],radi[i]*numpy.sin(t)+centers[1,i],'k-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
k_label = QLabel('Number of Clusters')
self.k = QLineEdit()
self.k.setText("2")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(k_label)
spins_hbox.addWidget(self.k)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Cluster!")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.distance_combo = QComboBox()
self.distance_combo.insertItem(-1, "EuclideanDistance")
self.distance_combo.insertItem(-1, "ManhattanMetric")
self.distance_combo.insertItem(-1, "JensenMetric")
self.distance_combo.maximumSize = QSize(300, 50)
self.connect(self.distance_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.distance_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| |
import sys
import os
import unittest
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.versions
from pymel.util.testing import TestCaseExtended
if not hasattr(cmds, 'about'):
import maya.standalone
maya.standalone.initialize()
#===============================================================================
# Current Bugs
#===============================================================================
# For CURRENT bugs, we PASS is the bug is still present, and FAIL if it goes
# away... this may be counter-intuitive, but it acts as an alert if a bug is
# fixed (so we can possibly get rid of yucky work-around code...)
# Bug report 378211
class TestConstraintAngleOffsetQuery(TestCaseExtended):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
for cmdName in ('aimConstraint', 'orientConstraint'):
cube1 = cmds.polyCube()[0]
cube2 = cmds.polyCube()[0]
cmd = getattr(cmds, cmdName)
constraint = cmd(cube1, cube2)[0]
setVals = (12, 8, 7)
cmd(constraint, e=1, offset=setVals)
getVals = tuple(cmd(constraint, q=1, offset=1))
# self.assertVectorsEqual(setVals, getVals)
# check that things are BAD!
try:
self.assertVectorsEqual(setVals, getVals)
except AssertionError:
pass
else:
self.fail("TestConstraintAngleOffsetQuery was fixed! Huzzah!")
# Bug report 378192
class TestEmptyMFnNurbsCurve(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
shapeStr = cmds.createNode('nurbsCurve', n="RigWorldShape")
selList = om.MSelectionList()
selList.add(shapeStr)
node = om.MObject()
selList.getDependNode(0, node)
mnc = om.MFnNurbsCurve()
self.assertTrue(mnc.hasObj(node))
# try:
# mnc.setObject(node)
# except Exception:
# self.fail("MFnNurbs curve doesn't work with empty curve object")
# check that things are BAD!
try:
mnc.setObject(node)
except Exception:
pass
else:
self.fail("MFnNurbs curve now works with empty curve objects! Yay!")
# Bug report 344037
class TestSurfaceRangeDomain(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
# create a nurbs sphere
mySphere = cmds.sphere()[0]
# a default sphere should have u/v
# parameter ranges of 0:4/0:8
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[2:3][0:8]',
'nurbsSphere1.u[2:3][*]',
'nurbsSphere1.u[2:3]',
'nurbsSphere1.uv[2:3][0:8]',
'nurbsSphere1.uv[2:3][*]',
'nurbsSphere1.uv[2:3]',
'nurbsSphere1.v[0:8][2:3]',
'nurbsSphere1.v[*][2:3]')
# Passes
cmds.select('nurbsSphere1.u[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.u[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphere1.u[2:3][0:1]'
cmds.select('nurbsSphere1.uv[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# The following selections should
# result in one of these:
desiredResults = ('nurbsSphere1.u[0:4][2:3]',
'nurbsSphere1.u[*][2:3]',
'nurbsSphere1.uv[0:4][2:3]',
'nurbsSphere1.uv[*][2:3]',
'nurbsSphere1.v[2:3][0:4]',
'nurbsSphere1.v[2:3][*]',
'nurbsSphere1.v[2:3]')
# Passes
cmds.select('nurbsSphere1.u[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Passes
cmds.select('nurbsSphere1.v[2:3][*]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:1][2:3]'
cmds.select('nurbsSphere1.v[2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
# Fails! - returns 'nurbsSphereShape1.u[0:4][0:1]'
cmds.select('nurbsSphere1.uv[*][2:3]')
self.assertTrue(cmds.ls(sl=1)[0] in desiredResults)
except AssertionError:
pass
else:
# check that things are BAD!
self.fail("Nurbs surface range domain bug fixed!")
# Bug report 345384
# This bug only seems to affect windows (or at least, Win x64 -
# haven't tried on 32-bit).
class TestMMatrixSetAttr(unittest.TestCase):
def setUp(self):
# pymel essentially fixes this bug by wrapping
# the api's __setattr__... so undo this before testing
if 'pymel.internal.factories' in sys.modules:
factories = sys.modules['pymel.internal.factories']
self.origSetAttr = factories.MetaMayaTypeWrapper._originalApiSetAttrs.get(om.MMatrix, None)
else:
self.origSetAttr = None
if self.origSetAttr:
self.fixedSetAttr = om.MMatrix.__setattr__
om.MMatrix.__setattr__ = self.origSetAttr
cmds.file(new=1, f=1)
def runTest(self):
# We expect it to fail on windows, and pass on other operating systems...
shouldPass = os.name != 'nt'
try:
class MyClass1(object):
def __init__(self):
self._bar = 'not set'
def _setBar(self, val):
print "setting bar to:", val
self._bar = val
def _getBar(self):
print "getting bar..."
return self._bar
bar = property(_getBar, _setBar)
# These two are just so we can trace what's going on...
def __getattribute__(self, name):
# don't just use 'normal' repr, as that will
# call __getattribute__!
print "__getattribute__(%s, %r)" % (object.__repr__(self), name)
return super(MyClass1, self).__getattribute__(name)
def __setattr__(self, name, val):
print "__setattr__(%r, %r, %r)" % (self, name, val)
return super(MyClass1, self).__setattr__(name, val)
foo1 = MyClass1()
# works like we expect...
foo1.bar = 7
print "foo1.bar:", foo1.bar
self.assertTrue(foo1.bar == 7)
class MyClass2(MyClass1, om.MMatrix): pass
foo2 = MyClass2()
foo2.bar = 7
# Here, on windows, MMatrix's __setattr__ takes over, and
# (after presumabably determining it didn't need to do
# whatever special case thing it was designed to do)
# instead of calling the super's __setattr__, which would
# use the property, inserts it into the object's __dict__
# manually
print "foo2.bar:", foo2.bar
self.assertTrue(foo2.bar == 7)
except Exception:
if shouldPass:
raise
else:
if not shouldPass:
self.fail("MMatrix setattr bug seems to have been fixed!")
def tearDown(self):
# Restore the 'fixed' __setattr__'s
if self.origSetAttr:
om.MMatrix.__setattr__ = self.fixedSetAttr
# Introduced in maya 2014
# Change request #: BSPR-12597
if pymel.versions.current() >= pymel.versions.v2014:
class TestShapeParentInstance(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
try:
import maya.cmds as cmds
def getShape(trans):
return cmds.listRelatives(trans, children=True, shapes=True)[0]
cmds.file(new=1, f=1)
shapeTransform = cmds.polyCube(name='singleShapePoly')[0]
origShape = getShape(shapeTransform)
dupeTransform1 = cmds.duplicate(origShape, parentOnly=1)[0]
cmds.parent(origShape, dupeTransform1, shape=True, addObject=True, relative=True)
dupeTransform2 = cmds.duplicate(dupeTransform1)[0]
cmds.delete(dupeTransform1)
dupeShape = getShape(dupeTransform2)
# In maya 2014, this raises:
# Error: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Traceback (most recent call last):
# File "<maya console>", line 13, in <module>
# RuntimeError: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable. #
cmds.parent(dupeShape, shapeTransform, shape=True, addObject=True, relative=True)
except Exception:
pass
else:
self.fail("ShapeParentInstance bug fixed!")
#===============================================================================
# Current bugs that will cause Maya to CRASH (and so are commented out!)
#===============================================================================
# This is commented out as it will cause a CRASH - uncomment out (or just
# copy/ paste the relevant code into the script editor) to test if it's still
# causing a crash...
# If you're copy / pasting into a script editor, in order for a crash to occur,
# all lines must be executed at once - if you execute one at a time, there will
# be no crash
# Also, I'm making the code in each of the test functions self-contained (ie,
# has all imports, etc) for easy copy-paste testing...
#class TestSubdivSelectCrash(unittest.TestCas):
# def testCmds(self):
# import maya.cmds as cmds
# cmds.file(new=1, f=1)
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# cmds.select(subd + '.sme[*][*]')
#
# def testApi(self):
# import maya.cmds as cmds
# import maya.OpenMaya as om
#
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# selList = om.MSelectionList()
# selList.add(subd + '.sme[*][*]')
#===============================================================================
# FIXED (Former) Bugs
#===============================================================================
# Fixed in Maya 2009! yay!
class TestConstraintVectorQuery(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def _doTestForConstraintType(self, constraintType):
cmd = getattr(cmds, constraintType)
if constraintType == 'tangentConstraint':
target = cmds.circle()[0]
else:
target = cmds.polyCube()[0]
constrained = cmds.polyCube()[0]
constr = cmd(target, constrained)[0]
self.assertEqual(cmd(constr, q=1, worldUpVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, upVector=1), [0,1,0])
self.assertEqual(cmd(constr, q=1, aimVector=1), [1,0,0])
def test_aimConstraint(self):
self._doTestForConstraintType('aimConstraint')
def test_normalConstraint(self):
self._doTestForConstraintType('normalConstraint')
def test_tangentConstraint(self):
self._doTestForConstraintType('tangentConstraint')
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestMatrixSetAttr(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
res = cmds.sphere(n='node')
cmds.addAttr(ln='matrixAttr',dt="matrix")
def runTest(self):
cmds.setAttr( 'node.matrixAttr', 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, type='matrix' )
# Bug report 345382
# Fixed ! Yay! (...though I've only check on win64...)
# (not sure when... was fixed by time of 2011 Hotfix 1 - api 201101,
# and still broken in 2009 SP1a - api 200906)
class TestFluidMFnCreation(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
fluid = cmds.createNode('fluidShape')
selList = om.MSelectionList()
selList.add(fluid)
dag = om.MDagPath()
selList.getDagPath(0, dag)
omfx.MFnFluid(dag)
# nucleus node fixed in 2014
# symmetryConstraint fixed in 2015
class TestMFnCompatibility(unittest.TestCase):
def setUp(self):
cmds.file(new=1, f=1)
def _assertInheritMFnConistency(self, nodeType, parentNodeType, mfnType):
nodeInstName = cmds.createNode(nodeType)
selList = om.MSelectionList()
selList.add(nodeInstName)
mobj = om.MObject()
selList.getDependNode(0, mobj)
self.assertTrue(parentNodeType in cmds.nodeType(nodeInstName, inherited=True))
try:
mfnType(mobj)
except Exception, e:
raise self.fail("Error creating %s even though %s inherits from %s: %s" %
(mfnType.__name__, nodeType, parentNodeType, e))
def test_nucleus_MFnDagNode(self):
self._assertInheritMFnConistency('nucleus', 'dagNode', om.MFnDagNode)
def test_nucleus_MFnTransform(self):
self._assertInheritMFnConistency('nucleus', 'transform', om.MFnTransform)
def test_symmetryConstraint_test_nucleus_MFnDagNode(self):
self._assertInheritMFnConistency('symmetryConstraint', 'dagNode', om.MFnDagNode)
def test_symmetryConstraint_MFnTransform(self):
self._assertInheritMFnConistency('symmetryConstraint', 'transform',
om.MFnTransform)
# These probably aren't strictly considered "bugs" by autodesk, though I
# think they should be...
# def test_hikHandle_MFnIkHandle(self):
# self._assertInheritMFnConistency('hikHandle', 'ikHandle', oma.MFnIkHandle)
#
# def test_jointFfd_MFnLatticeDeformer(self):
# self._assertInheritMFnConistency('jointFfd', 'ffd', oma.MFnLatticeDeformer)
#
# def test_transferAttributes_MFnWeightGeometryFilter(self):
# self._assertInheritMFnConistency('transferAttributes', 'weightGeometryFilter', oma.MFnWeightGeometryFilter)
#
# def test_transferAttributes_MFnGeometryFilter(self):
# self._assertInheritMFnConistency('transferAttributes', 'geometryFilter', oma.MFnGeometryFilter)
# Fixed in 2014! yay!
class TestGroupUniqueness(unittest.TestCase):
'''Test to check whether cmds.group returns a unique name
'''
def setUp(self):
cmds.file(new=1, f=1)
def runTest(self):
cmds.select(cl=1)
cmds.group(n='foo', empty=1)
cmds.group(n='bar')
cmds.select(cl=1)
res = cmds.group(n='foo', empty=1)
sameNames = cmds.ls(res)
if len(sameNames) < 1:
self.fail('cmds.group did not return a valid name')
elif len(sameNames) > 1:
self.fail('cmds.group did not return a unique name')
| |
"""conf.py."""
# -*- coding: utf-8 -*-
#
# napalm documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 16 13:17:14 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from collections import defaultdict
import json
import os
import re
import subprocess
import sys
from glob import glob
from napalm.base import NetworkDriver
from jinja2 import Environment, FileSystemLoader
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
autoclass_content = "both"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "NAPALM"
copyright = "2021, David Barroso/Mircea Ulinic/Kirk Byers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "3"
# The full version, including alpha/beta/rc tags.
release = "3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "napalm_ansible_repo"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "napalmdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "napalm.tex", u"NAPALM Documentation", u"David Barroso", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "napalm", u"NAPALM Documentation", [u"David Barroso"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"napalm",
u"NAPALM Documentation",
u"David Barroso",
"napalm",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Disable pdf and epub generation
enable_pdf_build = False
enable_epub_build = False
EXCLUDE_METHODS = (
"cli",
"close",
"commit_config",
"confirm_commit",
"has_pending_commit",
"compare_config",
"discard_config",
"load_merge_candidate",
"load_replace_candidate",
"load_template",
"open",
"rollback",
"compliance_report",
"connection_tests",
"post_connection_tests",
"pre_connection_tests",
)
EXCLUDE_IN_REPORT = "test_method_signatures"
METHOD_ALIASES = {
"get_config_filtered": "get_config",
"get_arp_table_with_vrf": "get_arp_table",
"get_route_to_longer": "get_route_to",
"get_config_sanitized": "get_config",
}
def _merge_results(last, intermediate):
if intermediate == "failed":
return "failed"
elif intermediate == "skipped":
return "failed" if last == "failed" else "skipped"
elif intermediate == "passed":
return "ok" if last == "ok" else last
else:
return last
def build_napalm_ansible_module_docs(app):
"""Create documentation for Ansible modules."""
# Add script to clone napalm-ansible repo
status = subprocess.call(
"./build-ansible-module-docs.sh", stdout=sys.stdout, stderr=sys.stderr
)
if status != 0:
print("Something bad happened when processing the Ansible modules.")
sys.exit(-1)
env = Environment(loader=FileSystemLoader("."))
modules_dir = "./integrations/ansible/modules/source"
module_files = glob("{0}/*.json".format(modules_dir))
for module_file in module_files:
with open(module_file, "r") as f:
module = module_file.split("/")[-1].split(".")[0]
data = json.loads(f.read())
data["name"] = module
module_dir = "./integrations/ansible/modules/{0}".format(module)
try:
os.stat(module_dir)
except Exception:
os.mkdir(module_dir)
template_file = env.get_template("ansible-module.j2")
rendered_template = template_file.render(**data)
with open("{0}/index.rst".format(module_dir), "w") as f:
f.write(rendered_template)
def build_getters_support_matrix(app):
"""Build the getters support matrix."""
status = subprocess.call("./test.sh", stdout=sys.stdout, stderr=sys.stderr)
if status != 0:
print("Something bad happened when processing the test reports.")
sys.exit(-1)
drivers = set()
matrix = {
m: defaultdict(dict)
for m in dir(NetworkDriver)
if not (m.startswith("_") or m in EXCLUDE_METHODS)
}
regex_name = re.compile(r"(?P<driver>\w+)\/.*::test_(?P<getter>\w+)")
filename = "./support/tests/report.json"
with open(filename, "r") as f:
data = json.loads(f.read())
for test in data["report"]["tests"]:
match = regex_name.search(test["name"])
if match:
driver = match.group("driver")
drivers.add(driver)
method = match.group("getter")
else:
continue
if method in EXCLUDE_IN_REPORT:
continue
result = test["outcome"]
if method in METHOD_ALIASES.keys():
method = METHOD_ALIASES[method]
intermediate_result = matrix[method].get(driver, None)
matrix[method][driver] = _merge_results(result, intermediate_result)
sorted_methods = sorted(matrix.keys())
drivers = sorted(drivers)
env = Environment(loader=FileSystemLoader("."))
template_file = env.get_template("matrix.j2")
rendered_template = template_file.render(
matrix=matrix, drivers=drivers, sorted_methods=sorted_methods
)
with open("support/matrix.rst", "w") as f:
f.write(rendered_template)
def setup(app):
"""Map methods to states of the documentation build."""
app.connect("builder-inited", build_getters_support_matrix)
app.connect("builder-inited", build_napalm_ansible_module_docs)
build_getters_support_matrix(None)
build_napalm_ansible_module_docs(None)
| |
# -*- coding: utf-8 -*-
"""
Test pylti/test_common.py module
"""
import unittest
import semantic_version
import httpretty
import oauthlib.oauth1
from urlparse import urlparse, parse_qs
import urllib
import pylti
from pylti.common import (
LTIOAuthDataStore,
verify_request_common,
LTIException,
post_message,
post_message2,
generate_request_xml
)
from pylti.tests.util import TEST_CLIENT_CERT
class TestCommon(unittest.TestCase):
"""
Tests for common.py
"""
# pylint: disable=too-many-public-methods
# Valid XML response from LTI 1.0 consumer
expected_response = """<?xml version="1.0" encoding="UTF-8"?>
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1\
/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>edX_fix</imsx_messageIdentifier>
<imsx_statusInfo>
<imsx_codeMajor>success</imsx_codeMajor>
<imsx_severity>status</imsx_severity>
<imsx_description>Score for StarX/StarX_DEMO/201X_StarX:\
edge.edx.org-i4x-StarX-StarX_DEMO-lti-40559041895b4065b2818c23b9cd9da8\
:18b71d3c46cb4dbe66a7c950d88e78ec is now 0.0</imsx_description>
<imsx_messageRefIdentifier>
</imsx_messageRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody><replaceResultResponse/></imsx_POXBody>
</imsx_POXEnvelopeResponse>
"""
@staticmethod
def test_version():
"""
Will raise ValueError if not a semantic version
"""
semantic_version.Version(pylti.VERSION)
def test_lti_oauth_data_store(self):
"""
Tests that LTIOAuthDataStore works
"""
consumers = {
"key1": {"secret": "secret1"},
"key2": {"secret": "secret2"},
"key3": {"secret": "secret3"},
"keyNS": {"test": "test"},
"keyWCert": {"secret": "secret", "cert": "cert"},
}
store = LTIOAuthDataStore(consumers)
self.assertEqual(store.lookup_consumer("key1").secret, "secret1")
self.assertEqual(store.lookup_consumer("key2").secret, "secret2")
self.assertEqual(store.lookup_consumer("key3").secret, "secret3")
self.assertEqual(store.lookup_cert("keyWCert"), "cert")
self.assertIsNone(store.lookup_consumer("key4"))
self.assertIsNone(store.lookup_cert("key4"))
self.assertIsNone(store.lookup_consumer("keyNS"))
self.assertIsNone(store.lookup_cert("keyNS"))
def test_lti_oauth_data_store_no_consumers(self):
"""
If consumers are not given it there are no consumer to return.
"""
store = LTIOAuthDataStore(None)
self.assertIsNone(store.lookup_consumer("key1"))
self.assertIsNone(store.lookup_cert("key1"))
def test_verify_request_common(self):
"""
verify_request_common succeeds on valid request
"""
headers = dict()
consumers, method, url, verify_params, _ = (
self.generate_oauth_request()
)
ret = verify_request_common(consumers, url, method,
headers, verify_params)
self.assertTrue(ret)
def test_verify_request_common_via_proxy(self):
"""
verify_request_common succeeds on valid request via proxy
"""
headers = dict()
headers['X-Forwarded-Proto'] = 'https'
orig_url = 'https://localhost:5000/?'
consumers, method, url, verify_params, _ = (
self.generate_oauth_request(url_to_sign=orig_url)
)
ret = verify_request_common(consumers, url, method,
headers, verify_params)
self.assertTrue(ret)
def test_verify_request_common_no_oauth_fields(self):
"""
verify_request_common fails on missing authentication
"""
headers = dict()
consumers, method, url, _, params = (
self.generate_oauth_request()
)
with self.assertRaises(LTIException):
verify_request_common(consumers, url, method, headers, params)
def test_verify_request_common_no_params(self):
"""
verify_request_common fails on missing parameters
"""
consumers = {
"__consumer_key__": {"secret": "__lti_secret__"}
}
url = 'http://localhost:5000/?'
method = 'GET'
headers = dict()
params = dict()
with self.assertRaises(LTIException):
verify_request_common(consumers, url, method, headers, params)
@httpretty.activate
def test_post_response_invalid_xml(self):
"""
Test post message with invalid XML response
"""
uri = (u'https://edge.edx.org/courses/MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;_94173d3e79d145fd8ec2e83f15836ac8/'
u'handler_noauth/grade_handler')
def request_callback(request, cburi, headers):
# pylint: disable=unused-argument
"""
Mock success response.
"""
return 200, headers, "success"
httpretty.register_uri(httpretty.POST, uri, body=request_callback)
consumers = {
"__consumer_key__": {"secret": "__lti_secret__"}
}
body = '<xml></xml>'
ret = post_message(consumers, "__consumer_key__", uri, body)
self.assertFalse(ret)
@httpretty.activate
def test_post_response_valid_xml(self):
"""
Test post grade with valid XML response
"""
uri = 'https://localhost:8000/dev_stack'
def request_callback(request, cburi, headers):
# pylint: disable=unused-argument,
"""
Mock expected response.
"""
return 200, headers, self.expected_response
httpretty.register_uri(httpretty.POST, uri, body=request_callback)
consumers = {
"__consumer_key__": {
"secret": "__lti_secret__",
"cert": TEST_CLIENT_CERT,
},
}
body = generate_request_xml('message_identifier_id', 'operation',
'lis_result_sourcedid', '1.0')
ret = post_message(consumers, "__consumer_key__", uri, body)
self.assertTrue(ret)
ret = post_message2(consumers, "__consumer_key__", uri, body)
self.assertTrue(ret)
def test_generate_xml(self):
"""
Generated post XML is valid
"""
xml = generate_request_xml('message_identifier_id', 'operation',
'lis_result_sourcedid', 'score')
self.assertEqual(xml, """<?xml version='1.0' encoding='utf-8'?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/\
imsoms_v1p0"><imsx_POXHeader><imsx_POXRequestHeaderInfo><imsx_version>V1.0\
</imsx_version><imsx_messageIdentifier>message_identifier_id\
</imsx_messageIdentifier></imsx_POXRequestHeaderInfo></imsx_POXHeader>\
<imsx_POXBody><operationRequest><resultRecord><sourcedGUID><sourcedId>\
lis_result_sourcedid</sourcedId></sourcedGUID><result><resultScore>\
<language>en</language><textString>score</textString></resultScore>\
</result></resultRecord></operationRequest></imsx_POXBody>\
</imsx_POXEnvelopeRequest>""")
xml = generate_request_xml('message_identifier_id', 'operation',
'lis_result_sourcedid', None)
self.assertEqual(xml, """<?xml version='1.0' encoding='utf-8'?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/\
imsoms_v1p0"><imsx_POXHeader><imsx_POXRequestHeaderInfo><imsx_version>V1.0\
</imsx_version><imsx_messageIdentifier>message_identifier_id\
</imsx_messageIdentifier></imsx_POXRequestHeaderInfo></imsx_POXHeader>\
<imsx_POXBody><operationRequest><resultRecord><sourcedGUID><sourcedId>\
lis_result_sourcedid</sourcedId></sourcedGUID></resultRecord></operationRequest>\
</imsx_POXBody></imsx_POXEnvelopeRequest>""")
@staticmethod
def generate_oauth_request(url_to_sign=None):
"""
This code generated valid LTI 1.0 basic-lti-launch-request request
"""
consumers = {
"__consumer_key__": {"secret": "__lti_secret__"}
}
url = url_to_sign or 'http://localhost:5000/?'
method = 'GET'
params = {'resource_link_id': u'edge.edx.org-i4x-MITx-ODL_ENG-'
u'lti-94173d3e79d145fd8ec2e83f15836ac8',
'user_id': u'008437924c9852377e8994829aaac7a1',
'roles': u'Instructor',
'lis_result_sourcedid': u'MITx/ODL_ENG/2014_T1:edge.edx.org-'
u'i4x-MITx-ODL_ENG-lti-'
u'94173d3e79d145fd8ec2e83f15836ac8'
u':008437924c9852377e8994829aaac7a1',
'context_id': u'MITx/ODL_ENG/2014_T1',
'lti_version': u'LTI-1p0',
'launch_presentation_return_url': u'',
'lis_outcome_service_url': u'https://edge.edx.org/courses/'
u'MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;_'
u'94173d3e79d145fd8ec2e83f1583'
u'6ac8/handler_noauth'
u'/grade_handler',
'lti_message_type': u'basic-lti-launch-request'}
urlparams = urllib.urlencode(params)
client = oauthlib.oauth1.Client('__consumer_key__',
client_secret='__lti_secret__',
signature_method=oauthlib.oauth1.
SIGNATURE_HMAC,
signature_type=oauthlib.oauth1.
SIGNATURE_TYPE_QUERY)
signature = client.sign("{}{}".format(url, urlparams))
url_parts = urlparse(signature[0])
query_string = parse_qs(url_parts.query, keep_blank_values=True)
verify_params = dict()
for key, value in query_string.iteritems():
verify_params[key] = value[0]
return consumers, method, url, verify_params, params
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Loss operations for use in neural networks.
Note: By default all the losses are collected into the `GraphKeys.LOSSES`
collection.
All of the loss functions take a pair of predictions and ground truth labels,
from which the loss is computed. It is assumed that the shape of both these
tensors is of the form [batch_size, d1, ... dN] where `batch_size` is the number
of samples in the batch and `d1` ... `dN` are the remaining dimensions.
It is common, when training with multiple loss functions, to adjust the relative
strengths of individual losses. This is performed by rescaling the losses via
a `weight` parameter passed to the loss functions. For example, if we were
training with both log_loss and sum_of_squares_loss, and we wished that the
log_loss penalty be twice as severe as the sum_of_squares_loss, we would
implement this as:
# Explicitely set the weight.
tf.contrib.losses.log(predictions, targets, weight=2.0)
# Uses default weight of 1.0
tf.contrib.losses.sum_of_squares(predictions, targets)
# All the losses are collected into the `GraphKeys.LOSSES` collection.
losses = tf.get_collection(tf.GraphKeys.LOSSES)
While specifying a scalar loss rescales the loss over the entire batch,
we sometimes want to rescale the loss per batch sample. For example, if we have
certain examples that matter more to us to get correctly, we might want to have
a higher loss that other samples whose mistakes matter less. In this case, we
can provide a weight vector of length `batch_size` which results in the loss
for each sample in the batch being scaled by the corresponding weight element.
For example, consider the case of a classification problem where we want to
maximize our accuracy but we especially interested in obtaining high accuracy
for a specific class:
inputs, labels = LoadData(batch_size=3)
logits = MyModelPredictions(inputs)
# Ensures that the loss for examples whose ground truth class is `3` is 5x
# higher than the loss for all other examples.
weight = tf.mul(4, tf.cast(tf.equal(labels, 3), tf.float32)) + 1
onehot_labels = tf.one_hot(labels, num_classes=5)
tf.contrib.losses.softmax_cross_entropy(logits, onehot_labels, weight=weight)
Finally, in certain cases, we may want to specify a different loss for every
single measurable value. For example, if we are performing per-pixel depth
prediction, or per-pixel denoising, a single batch sample has P values where P
is the number of pixels in the image. For many losses, the number of measurable
values matches the number of elements in the predictions and targets tensors.
For others, such as softmax_cross_entropy and cosine_distance, the
loss functions reduces the dimensions of the inputs to produces a tensor of
losses for each measurable value. For example, softmax_cross_entropy takes as
input predictions and labels of dimension [batch_size, num_classes] but the
number of measurable values is [batch_size]. Consequently, when passing a weight
tensor to specify a different loss for every measurable value, the dimension of
the tensor will depend on the loss being used.
For a concrete example, consider the case of per-pixel depth prediction where
certain ground truth depth values are missing (due to sensor noise in the
capture process). In this case, we want to assign zero weight to losses for
these predictions.
# 'depths' that are missing have a value of 0:
images, depths = LoadData(...)
predictions = MyModelPredictions(images)
weight = tf.cast(tf.greater(depths, 0), tf.float32)
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
Note that when using weights for the losses, the final average is computed
by rescaling the losses by the weights and then dividing by the total number of
non-zero samples. For an arbitrary set of weights, this may not necessarily
produce a weighted average. Instead, it simply and transparently rescales the
per-element losses before averaging over the number of observations. For example
if the losses computed by the loss function is an array [4, 1, 2, 3] and the
weights are an array [1, 0.5, 3, 9], then the average loss is:
(4*1 + 1*0.5 + 2*3 + 3*9) / 4
However, with a single loss function and an arbitrary set of weights, one can
still easily create a loss function such that the resulting loss is a
weighted average over the individual prediction errors:
images, labels = LoadData(...)
predictions = MyModelPredictions(images)
weight = MyComplicatedWeightingFunction(labels)
weight = tf.div(weight, tf.size(weight))
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
@@absolute_difference
@@add_loss
@@cosine_distance
@@get_losses
@@get_total_loss
@@log
@@sigmoid_cross_entropy
@@softmax_cross_entropy
@@sum_of_pairwise_squares
@@sum_of_squares
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.all_util import make_all
def _scale_losses(losses, weight):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weight: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weight` at which point the reduced `losses` are element-wise
multiplied by `weight` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weight` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weight.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.mul(reduced_losses, weight)
return math_ops.reduce_sum(reduced_losses)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.select(
math_ops.greater(num_present, 0),
math_ops.div(total_loss, math_ops.select(
math_ops.equal(num_present, 0), 1.0, num_present)),
array_ops.zeros_like(total_loss),
name="value")
def _compute_weighted_loss(losses, weight):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the weight shape is not compatible with the losses shape or
if the number of dimensions (rank) of either losses or weight is missing.
"""
losses = math_ops.to_float(losses)
weight = math_ops.to_float(ops.convert_to_tensor(weight))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
if weight.get_shape().ndims is None:
raise ValueError("weight.get_shape().ndims cannot be None")
total_loss = _scale_losses(losses, weight)
num_present = _num_present(losses, weight)
mean_loss = _safe_mean(total_loss, num_present)
ops.add_to_collection(ops.GraphKeys.LOSSES, mean_loss)
return mean_loss
def _num_present(losses, weight, per_batch=False):
"""Computes the number of elements in the loss function induced by `weight`.
A given weight tensor induces different numbers of usable elements in the
`losses` tensor. The `weight` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and weight is a tensor of size [4, 5], then weight is, in effect,
tiled to match the size of `losses`. Following this effective tile, the total
number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# To ensure that dims of [2, 1] gets mapped to [2,]
weight = array_ops.squeeze(weight)
# If the weight is a scalar, its easy to compute:
if weight.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = math_ops.select(math_ops.equal(weight, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weight.get_shape().ndims >= 1:
reduction_indices = list(range(1, weight.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weight, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weight.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
def add_loss(loss):
"""Adds a externally defined loss to collection of losses.
Args:
loss: A loss `Tensor`.
"""
ops.add_to_collection(ops.GraphKeys.LOSSES, loss)
def get_losses(scope=None):
"""Gets the list of loss variables.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
a list of loss variables.
"""
return ops.get_collection(ops.GraphKeys.LOSSES, scope)
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of loss variables.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
def absolute_difference(predictions, targets, weight=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
targets: The ground truth output tensor, same dimensions as 'predictions'.
weight: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `targets` or
if the shape of `weight` is invalid.
"""
with ops.op_scope([predictions, targets],
scope, "sum_of_squares_loss") as scope:
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
targets = math_ops.to_float(targets)
losses = math_ops.abs(math_ops.sub(predictions, targets))
return _compute_weighted_loss(losses, weight)
def sigmoid_cross_entropy(logits, multi_class_labels, weight=1.0,
label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] target labels in (0, 1).
weight: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
"""
with ops.op_scope([logits, multi_class_labels],
scope, "sigmoid_cross_entropy_loss"):
return _cross_entropy(logits, multi_class_labels, weight,
label_smoothing,
activation_fn=nn.sigmoid_cross_entropy_with_logits)
def softmax_cross_entropy(logits, onehot_labels, weight=1.0,
label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] target one_hot_encoded labels.
weight: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
"""
with ops.op_scope([logits, onehot_labels],
scope, "softmax_cross_entropy_loss"):
return _cross_entropy(logits, onehot_labels, weight,
label_smoothing,
activation_fn=nn.softmax_cross_entropy_with_logits)
def _cross_entropy(logits, onehot_labels, weight, label_smoothing,
activation_fn):
"""Adds a CrossEntropyLoss to the losses collection.
`weight` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weight` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] target one_hot_encoded labels.
weight: Coefficients for the loss. If the activation is SIGMOID, then the
weight shape must be one of [1], [batch_size] or logits.shape().
Otherwise, the weight shape must be either [1] or [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
activation_fn: The activation function to use. The method must take three
arguments, the logits, the labels, and an operation name.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `targets` or
if the shape of `weight` is invalid or if `weight` is None.
"""
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = onehot_labels.get_shape()[1].value
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = activation_fn(logits, onehot_labels, name="xentropy")
return _compute_weighted_loss(losses, weight)
def log(predictions, targets, weight=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
targets: The ground truth output tensor, same dimensions as 'predictions'.
weight: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `targets` or
if the shape of `weight` is invalid.
"""
with ops.op_scope([predictions, targets],
scope, "log_loss") as scope:
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
targets = math_ops.to_float(targets)
losses = -math_ops.mul(
targets,
math_ops.log(predictions + epsilon)) - math_ops.mul(
(1 - targets), math_ops.log(1 - predictions + epsilon))
return _compute_weighted_loss(losses, weight)
def sum_of_squares(predictions, targets, weight=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector. If the shape of
`weight` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weight`.
Args:
predictions: The predicted outputs.
targets: The ground truth output tensor, same dimensions as 'predictions'.
weight: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `targets` or
if the shape of `weight` is invalid.
"""
with ops.op_scope([predictions, targets],
scope, "sum_of_squares_loss") as scope:
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
targets = math_ops.to_float(targets)
losses = math_ops.square(math_ops.sub(predictions, targets))
return _compute_weighted_loss(losses, weight)
def sum_of_pairwise_squares(predictions, targets, weight=1.0, scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike the sum_of_squares loss, which is a measure of the differences between
corresponding elements of `predictions` and `targets`, sum_of_pairwise_squares
is a measure of the differences between pairs of corresponding elements of
`predictions` and `targets`.
For example, if `targets`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimenion [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weight` acts as a coefficient for the loss. If a scalar is provided, then the
loss is simply scaled by the given value. If `weight` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weight` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
targets: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weight: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `targets` or
if the shape of `weight` is invalid.
"""
with ops.op_scope([predictions, targets],
scope, "sum_of_pairwise_squares_loss") as scope:
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
targets = math_ops.to_float(targets)
weight = math_ops.to_float(ops.convert_to_tensor(weight))
diffs = math_ops.sub(predictions, targets)
# Need to verify here since the function doesn't use _compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weight.get_shape().ndims is None:
raise ValueError("weight.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weight, per_batch=True)
term1 = 2.0 * math_ops.div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * math_ops.div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weight)
mean_loss = math_ops.select(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
ops.add_to_collection(ops.GraphKeys.LOSSES, mean_loss)
return mean_loss
def cosine_distance(predictions, targets, dim, weight=1.0, scope=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that the predictions and targets are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
targets: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weight: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If predictions.shape doesn't match targets.shape, if the ignore
mask is provided and its shape doesn't match targets.shape or if
the ignore mask is not boolean valued.
"""
with ops.op_scope([predictions, targets],
scope, "cosine_distance_loss") as scope:
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
targets = math_ops.to_float(targets)
radial_diffs = math_ops.mul(predictions, targets)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return _compute_weighted_loss(losses, weight)
__all__ = make_all(__name__)
| |
"""Definition of BRDF functions"""
import numpy as np
import sympy as sp
from functools import partial, update_wrapper, lru_cache
from .scatter import Scatter
from .rtplots import polarplot, hemreflect
class Surface(Scatter):
"""basic surface class"""
def __init__(self, **kwargs):
# set scattering angle generalization-matrix to [1,1,1] if it is not
# explicitly provided by the chosen class.
# this results in a peak in specular-direction which is suitable
# for describing surface BRDF's
self.a = getattr(self, "a", [1.0, 1.0, 1.0])
self.NormBRDF = kwargs.pop("NormBRDF", 1.0)
# quick way for visualizing the functions as polarplot
self.polarplot = partial(polarplot, X=self)
update_wrapper(self.polarplot, polarplot)
# quick way for visualizing the associated hemispherical reflectance
self.hemreflect = partial(hemreflect, SRF=self)
update_wrapper(self.hemreflect, hemreflect)
@lru_cache()
def _lambda_func(self, *args):
# define sympy objects
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
# replace arguments and evaluate expression
# sp.lambdify is used to allow array-inputs
args = (theta_0, theta_ex, phi_0, phi_ex) + tuple(args)
pfunc = sp.lambdify(args, self._func, modules=["numpy", "sympy"])
return pfunc
def brdf(self, t_0, t_ex, p_0, p_ex, param_dict={}):
"""
Calculate numerical value of the BRDF for chosen
incidence- and exit angles.
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
Returns
-------
array_like(float)
Numerical value of the BRDF
"""
# if an explicit numeric function is provided, use it, otherwise
# lambdify the available sympy-function
if hasattr(self, "_func_numeric"):
brdffunc = self._func_numeric
else:
brdffunc = self._lambda_func(*param_dict.keys())
# in case _func is a constant, lambdify will produce a function with
# scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output:
# TODO this is not a proper test !
if not isinstance(
brdffunc(
np.array([0.1, 0.2, 0.3]),
0.1,
0.1,
0.1,
**{key: 0.12 for key in param_dict.keys()}
),
np.ndarray,
):
brdffunc = np.vectorize(brdffunc)
return brdffunc(t_0, t_ex, p_0, p_ex, **param_dict)
def legexpansion(self, t_0, t_ex, p_0, p_ex, geometry):
"""
Definition of the legendre-expansion of the BRDF
.. note::
The output represents the legendre-expansion as needed to
compute the fn-coefficients for the chosen geometry!
(http://rt1.readthedocs.io/en/latest/theory.html#equation-fn_coef_definition)
The incidence-angle argument of the legexpansion() is different
to the documentation due to the direct definition of the argument
as the zenith-angle (t_0) instead of the incidence-angle
defined in a spherical coordinate system (t_i).
They are related via: t_i = pi - t_0
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
Returns
-------
sympy - expression
The legendre - expansion of the BRDF for the chosen geometry
"""
assert self.ncoefs > 0
theta_s = sp.Symbol("theta_s")
phi_s = sp.Symbol("phi_s")
NBRDF = self.ncoefs
n = sp.Symbol("n")
# define sympy variables based on chosen geometry
if geometry == "mono":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only a "
+ "single unique value for monostatic geometry"
)
theta_0 = sp.Symbol("theta_0")
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
else:
if geometry[0] == "v":
theta_0 = sp.Symbol("theta_0")
elif geometry[0] == "f":
assert len(np.unique(t_0)) == 1, (
"t_0 must contain only a "
+ "single unique value for geometry[0] == f"
)
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError("wrong choice of theta_0 geometry")
if geometry[1] == "v":
theta_ex = sp.Symbol("theta_ex")
elif geometry[1] == "f":
assert len(np.unique(t_ex)) == 1, (
"t_ex must contain only"
+ " a single unique value for geometry[1] == f"
)
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError("wrong choice of theta_ex geometry")
if geometry[2] == "v":
phi_0 = sp.Symbol("phi_0")
elif geometry[2] == "f":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only"
+ " a single unique value for geometry[2] == f"
)
phi_0 = np.unique(p_0)[0]
else:
raise AssertionError("wrong choice of phi_0 geometry")
if geometry[3] == "v":
phi_ex = sp.Symbol("phi_ex")
elif geometry[3] == "f":
assert len(np.unique(p_0)) == 1, (
"p_ex must contain only"
+ " a single unique value for geometry[3] == f"
)
phi_ex = np.unique(p_ex)[0]
else:
raise AssertionError("wrong choice of phi_ex geometry")
return sp.Sum(
self.legcoefs
* sp.legendre(n, self.scat_angle(theta_s, theta_ex, phi_s, phi_ex, self.a)),
(n, 0, NBRDF - 1),
)
def brdf_theta_diff(
self,
t_0,
t_ex,
p_0,
p_ex,
geometry,
param_dict={},
return_symbolic=False,
n=1,
):
"""
Calculation of the derivative of the BRDF with respect to
the scattering-angles t_ex
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
return_symbolic : bool (default = False)
indicator if symbolic result
should be returned
n : int (default = 1)
order of derivatives (d^n / d_theta^n)
Returns
-------
sympy - expression
The derivative of the BRDF with espect to the excident angle
t_ex for the chosen geometry
"""
# define sympy variables based on chosen geometry
if geometry == "mono":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only a "
+ "single unique value for monostatic geometry"
)
theta_0 = sp.Symbol("theta_0")
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
t_ex = t_0
p_ex = p_0 + np.pi
else:
if geometry[0] == "v":
theta_0 = sp.Symbol("theta_0")
elif geometry[0] == "f":
assert len(np.unique(t_0)) == 1, (
"t_0 must contain only a "
+ "single unique value for geometry[0] == f"
)
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError("wrong choice of theta_0 geometry")
if geometry[1] == "v":
theta_ex = sp.Symbol("theta_ex")
elif geometry[1] == "f":
assert len(np.unique(t_ex)) == 1, (
"t_ex must contain only"
+ " a single unique value for geometry[1] == f"
)
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError("wrong choice of theta_ex geometry")
if geometry[2] == "v":
phi_0 = sp.Symbol("phi_0")
elif geometry[2] == "f":
assert len(np.unique(p_0)) == 1, (
"p_0 must contain only"
+ " a single unique value for geometry[2] == f"
)
phi_0 = np.unique(p_0)[0]
else:
raise AssertionError("wrong choice of phi_0 geometry")
if geometry[3] == "v":
phi_ex = sp.Symbol("phi_ex")
elif geometry[3] == "f":
assert len(np.unique(p_0)) == 1, (
"p_ex must contain only"
+ " a single unique value for geometry[3] == f"
)
phi_ex = np.unique(p_ex)[0]
else:
raise AssertionError("wrong choice of phi_ex geometry")
if geometry[1] == "f":
dfunc_dtheta_0 = 0.0
else:
func = self._func.xreplace(
{
sp.Symbol("theta_0"): theta_0,
sp.Symbol("theta_ex"): theta_ex,
sp.Symbol("phi_0"): phi_0,
sp.Symbol("phi_ex"): phi_ex,
}
)
dfunc_dtheta_0 = sp.diff(func, theta_ex, n)
if return_symbolic is True:
return dfunc_dtheta_0
else:
args = (
sp.Symbol("theta_0"),
sp.Symbol("theta_ex"),
sp.Symbol("phi_0"),
sp.Symbol("phi_ex"),
) + tuple(param_dict.keys())
brdffunc = sp.lambdify(args, dfunc_dtheta_0, modules=["numpy", "sympy"])
# in case _func is a constant, lambdify will produce a function
# with scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output
# TODO this is not a proper test !
if not isinstance(
brdffunc(
np.array([0.1, 0.2, 0.3]),
0.1,
0.1,
0.1,
**{key: 0.12 for key in param_dict.keys()}
),
np.ndarray,
):
brdffunc = np.vectorize(brdffunc)
return brdffunc(t_0, t_ex, p_0, p_ex, **param_dict)
class LinCombSRF(Surface):
"""
Class to generate linear-combinations of volume-class elements
For details please look at the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#linear-combination-of-scattering-distributions)
Parameters
----------
SRFchoices : [ [float, Surface] , [float, Surface] , ...]
A list that contains the the individual BRDF's
(Surface-objects) and the associated weighting-factors
(floats) for the linear-combination.
NormBRDf : scalar(float)
Hemispherical reflectance of the combined BRDF
ATTENTION: NormBRDF-values provided within the SRFchoices-list
will not be considered!
"""
name = "LinCombSRF"
def __init__(self, SRFchoices=None, **kwargs):
super(LinCombSRF, self).__init__(**kwargs)
self.SRFchoices = SRFchoices
self._set_legexpansion()
name = "LinCombSRF"
for c in SRFchoices:
name += f"_({c[0]}, {c[1].name})"
self.name = name
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
return self._SRFcombiner()._func
def _set_legexpansion(self):
"""set legexpansion to the combined legexpansion"""
self.ncoefs = self._SRFcombiner().ncoefs
self.legexpansion = self._SRFcombiner().legexpansion
def _SRFcombiner(self):
"""
Returns a Surface-class element based on an input-array of
Surface-class elements.
The array must be shaped in the form:
SRFchoices = [ [ weighting-factor , Surface-class element ],
[ weighting-factor , Surface-class element ],
...]
ATTENTION: the .legexpansion()-function of the combined surface-class
element is no longer related to its legcoefs (which are set to 0.)
since the individual legexpansions of the combined surface-
class elements are possibly evaluated with a different
a-parameter of the generalized scattering angle! This does
not affect any calculations, since the evaluation is
only based on the use of the .legexpansion()-function.
"""
class BRDFfunction(Surface):
"""
dummy-Surface-class object used to generate
linear-combinations of BRDF-functions
"""
def __init__(self, **kwargs):
super(BRDFfunction, self).__init__(**kwargs)
self._func = 0.0
self.legcoefs = 0.0
# initialize a combined phase-function class element
SRFcomb = BRDFfunction(NormBRDf=self.NormBRDF)
# set ncoefs of the combined volume-class element to the maximum
SRFcomb.ncoefs = max([SRF[1].ncoefs for SRF in self.SRFchoices])
# number of coefficients within the chosen functions.
# (this is necessary for correct evaluation of fn-coefficients)
# find BRDF functions with equal a parameters
equals = [
np.where(
(np.array([VV[1].a for VV in self.SRFchoices]) == tuple(V[1].a)).all(
axis=1
)
)[0]
for V in self.SRFchoices
]
# evaluate index of BRDF-functions that have equal a parameter
# find phase functions where a-parameter is equal
equal_a = list({tuple(row) for row in equals})
# evaluation of combined expansion in legendre-polynomials
dummylegexpansion = []
for i in range(0, len(equal_a)):
SRFdummy = BRDFfunction()
# select SRF choices where a parameter is equal
SRFequal = np.take(self.SRFchoices, equal_a[i], axis=0)
# set ncoefs to the maximum number within the choices
# with equal a-parameter
SRFdummy.ncoefs = max([SRF[1].ncoefs for SRF in SRFequal])
# loop over phase-functions with equal a-parameter
for SRF in SRFequal:
# set parameters based on chosen phase-functions and evaluate
# combined legendre-expansion
SRFdummy.a = SRF[1].a
SRFdummy.NormBRDF = SRF[1].NormBRDF
SRFdummy._func = SRFdummy._func + SRF[1]._func * SRF[0]
SRFdummy.legcoefs += SRF[1].legcoefs * SRF[0]
dummylegexpansion = dummylegexpansion + [SRFdummy.legexpansion]
# combine legendre-expansions for each a-parameter based on given
# combined legendre-coefficients
SRFcomb.legexpansion = lambda t_0, t_ex, p_0, p_ex, geometry: np.sum(
[lexp(t_0, t_ex, p_0, p_ex, geometry) for lexp in dummylegexpansion]
)
for SRF in self.SRFchoices:
# set parameters based on chosen classes to define analytic
# function representation
SRFcomb._func = SRFcomb._func + SRF[1]._func * SRF[0]
return SRFcomb
class Isotropic(Surface):
"""
Define an isotropic surface brdf
Parameters
----------
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
name = "Isotropic"
def __init__(self, **kwargs):
super(Isotropic, self).__init__(**kwargs)
@property
def ncoefs(self):
# make ncoefs a property since it is fixed and should not be changed
# only 1 coefficient is needed to correctly represent
# the Isotropic scattering function
return 1
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
return (1.0 / sp.pi) * sp.KroneckerDelta(0, n)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
return 1.0 / sp.pi
class CosineLobe(Surface):
"""
Define a (possibly generalized) cosine-lobe of power i.
Parameters
----------
i : scalar(int)
Power of the cosine lobe, i.e. cos(x)^i
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
name = "CosineLobe"
def __init__(self, ncoefs=None, i=None, a=[1.0, 1.0, 1.0], **kwargs):
assert ncoefs is not None, (
"Error: number of coefficients " + "needs to be provided!"
)
assert i is not None, "Error: Cosine lobe power needs to be specified!"
super(CosineLobe, self).__init__(**kwargs)
assert ncoefs > 0
self.i = i
assert isinstance(self.i, int), (
"Error: Cosine lobe power needs " + "to be an integer!"
)
assert i >= 0, "ERROR: Power of Cosine-Lobe needs to be greater than 0"
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
assert all(type(x) == float for x in a), (
"Error: Generalization-"
+ "parameter array must "
+ "contain only floating-"
+ "point values!"
)
self.ncoefs = int(ncoefs)
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
# A13 The Rational(is needed as otherwise a Gamma function
# Pole error is issued)
return (
1.0
/ sp.pi
* (
(
2 ** (-2 - self.i)
* (1 + 2 * n)
* sp.sqrt(sp.pi)
* sp.gamma(1 + self.i)
)
/ (
sp.gamma((2 - n + self.i) * sp.Rational(1, 2))
* sp.gamma((3 + n + self.i) * sp.Rational(1, 2))
)
)
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
# self._func = sp.Max(self.scat_angle(theta_i,
# theta_s,
# phi_i,
# phi_s,
# a=self.a), 0.)**self.i # eq. A13
# alternative formulation avoiding the use of sp.Max()
# (this is done because sp.lambdify('x',sp.Max(x), "numpy")
# generates a function that can not interpret array inputs.)
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
return 1.0 / sp.pi * (x * (1.0 + sp.sign(x)) / 2.0) ** self.i
class HenyeyGreenstein(Surface):
"""
Define a HenyeyGreenstein scattering function for use as BRDF
approximation function.
Parameters
----------
t : scalar(float)
Asymmetry parameter of the Henyey-Greenstein function
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
name = "HenyeyGreenstein"
def __init__(self, t=None, ncoefs=None, a=[1.0, 1.0, 1.0], **kwargs):
assert t is not None, "t parameter needs to be provided!"
assert ncoefs is not None, "Number of coeff. needs to be specified"
super(HenyeyGreenstein, self).__init__(**kwargs)
self.t = t
self.ncoefs = ncoefs
assert self.ncoefs > 0
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
return (
1.0
* (1.0 - self.t ** 2.0)
/ ((sp.pi) * (1.0 + self.t ** 2.0 - 2.0 * self.t * x) ** 1.5)
)
@property
@lru_cache()
def legcoefs(self):
n = sp.Symbol("n")
return 1.0 * (1.0 / (sp.pi)) * (2.0 * n + 1) * self.t ** n
class HG_nadirnorm(Surface):
"""
Define a HenyeyGreenstein scattering function for use as BRDF
approximation function.
Parameters
----------
t : scalar(float)
Asymmetry parameter of the Henyey-Greenstein function
ncoefs : scalar(int)
Number of coefficients used within the Legendre-approximation
a : [ float , float , float ] , optional (default = [1.,1.,1.])
generalized scattering angle parameters used for defining the
scat_angle() of the BRDF
(http://rt1.readthedocs.io/en/latest/theory.html#equation-general_scat_angle)
NormBRDF : float, optional (default = 1.)
Normalization-factor used to scale the BRDF,
i.e. BRDF = NormBRDF * f(t_0,p_0,t_ex,p_ex)
"""
name = "HG_nadirnorm"
def __init__(self, t=None, ncoefs=None, a=[1.0, 1.0, 1.0], **kwargs):
assert t is not None, "t parameter needs to be provided!"
assert ncoefs is not None, "Number of coeffs needs to be specified"
super(HG_nadirnorm, self).__init__(**kwargs)
self.t = t
self.ncoefs = ncoefs
assert self.ncoefs > 0
self.a = a
assert isinstance(self.a, list), (
"Error: Generalization-parameter " + "needs to be a list"
)
assert len(a) == 3, (
"Error: Generalization-parameter list must " + "contain 3 values"
)
@property
@lru_cache()
def _func(self):
"""define phase function as sympy object for later evaluation"""
theta_0 = sp.Symbol("theta_0")
theta_ex = sp.Symbol("theta_ex")
phi_0 = sp.Symbol("phi_0")
phi_ex = sp.Symbol("phi_ex")
x = self.scat_angle(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
nadir_hemreflect = 4 * (
(1.0 - self.t ** 2.0)
* (
1.0
- self.t * (-self.t + self.a[0])
- sp.sqrt(
(1 + self.t ** 2 - 2 * self.a[0] * self.t) * (1 + self.t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* self.t ** 2.0
* sp.sqrt(1.0 + self.t ** 2.0 - 2.0 * self.a[0] * self.t)
)
)
func = (1.0 / nadir_hemreflect) * (
(1.0 - self.t ** 2.0)
/ ((sp.pi) * (1.0 + self.t ** 2.0 - 2.0 * self.t * x) ** 1.5)
)
return func
def _func_numeric(self, theta_0, theta_ex, phi_0, phi_ex, **kwargs):
"""direct numeric version of _func"""
if isinstance(self.t, sp.Symbol):
t = kwargs[str(self.t)]
else:
t = self.t
x = self._scat_angle_numeric(theta_0, theta_ex, phi_0, phi_ex, a=self.a)
nadir_hemreflect = 4 * (
(1.0 - t ** 2.0)
* (
1.0
- t * (-t + self.a[0])
- np.sqrt(
(1 + t ** 2 - 2 * self.a[0] * t) * (1 + t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* t ** 2.0
* np.sqrt(1.0 + t ** 2.0 - 2.0 * self.a[0] * t)
)
)
func = (1.0 / nadir_hemreflect) * (
(1.0 - t ** 2.0)
/ ((np.pi) * (1.0 + t ** 2.0 - 2.0 * t * x) ** 1.5)
)
return func
@property
@lru_cache()
def legcoefs(self):
nadir_hemreflect = 4 * (
(1.0 - self.t ** 2.0)
* (
1.0
- self.t * (-self.t + self.a[0])
- sp.sqrt(
(1 + self.t ** 2 - 2 * self.a[0] * self.t) * (1 + self.t ** 2)
)
)
/ (
2.0
* self.a[0] ** 2.0
* self.t ** 2.0
* sp.sqrt(1.0 + self.t ** 2.0 - 2.0 * self.a[0] * self.t)
)
)
n = sp.Symbol("n")
legcoefs = (1.0 / nadir_hemreflect) * (
(1.0 / (sp.pi)) * (2.0 * n + 1) * self.t ** n
)
return legcoefs
| |
#Rafael P Andrade
#Ficha 11: Programao com objetos (classes)
#9/XII/16
#1)
class estacionamento:
def __init__(self, lugares):
self.lug_total=lugares
self.lug_vagos=lugares
def lugares(self):
return self.lug_vagos
def entra(self):
if self.lug_vagos>0:
self.lug_vagos-=1
else:
raise ValueError('Sem lugares')
def sai(self):
if self.lug_vagos<self.lug_total:
self.lug_vagos+=1
else:
raise ValueError('Sem carros')
#2)
class racional:
#a)
def __init__(self, num, den):
if not ( isinstance(num, int) and isinstance(den, int) ):
raise TypeError('Argumentos invalidos')
elif den == 0:
raise ValueError('Erro: denominador nao pode ser nulo')
self.num= num
self.den= den
def __repr__(self):
#Com ajuda, usando algoritmo de Euclides para simplificacao (mdc)
def gcd(a,b):
while b:
a, b = b, a%b
return a
if self.den//maior_div_comum == 1:
return str(1)
else:
maior_div_comum=gcd(self.num, self.den)
return str(self.num//maior_div_comum) + '/' + \
str(self.den//maior_div_comum)
def iguais(self, r):
if not isinstance(r, racional):
raise TypeError('Evite comparar alhos e bugalhos...')
if self.num*r.den == r.num*self.den:
return True
else:
return False
def nume(self):
return self.num
def deno(self):
return self.den
#b)
def __add__(self, r):
if not isinstance(r, racional):
raise TypeError('Soma com alhos e bugalhos')
return racional(self.num*r.den + r.num*self.den, self.den*r.den )
def __mul__(self, r):
# O enunciado tem um erro aqui...
if not isinstance(r, racional):
raise TypeError('Soma com alhos e bugalhos')
return racional(self.num*r.num, self.den*r.den)
#3)
class automovel:
def __init__(self, deposito, combustivel, consumo):
if not ( isinstance(deposito, (int, float)) and \
isinstance(combustivel, (int, float)) and \
isinstance(consumo, (int, float)) ):
raise TypeError('Reveja argumentos')
if combustivel>deposito:
raise ValueError('Nao pode levar garrafoes')
if consumo <=0:
raise ValueError('Carro nao pode ser eletrico')
if deposito<0 or combustivel<0:
raise ValueError('Numeros positivos,sff')
self.deposit = deposito
self.fuel = combustivel
self.consumo = consumo
def combustivel(self):
return self.fuel
def autonomia(self):
return (self.fuel*100)//self.consumo
def abastece(self, litros):
if litros+self.fuel>self.deposit:
raise ValueError('Impossivel, overflow')
else:
self.fuel+=litros
def percorre(self, km):
if km > self.autonomia():
raise ValueError('Impossivel, muito longe')
else:
self.fuel= self.fuel - ((km//100)*self.consumo)
print('Viagem feita')
#4)
#E para usar a notacao de class? nao parece, pela especificacao
class conjunto:
#usando lista para guardar os elementos
def __repr__(self):
return str(self.elementos)
#Construtores
def __init__(self):
self.elementos=[]
def insere(self, el):
if el not in self.elementos:
self.elementos+=[el]
return self
#Seletores
def el_conj(self):
if len(self.elementos)>0:
from random import random
return self.elementos[ int( random()* len(self.elementos) )]
def retira_conj(self, el):
if el in self.elementos:
for i in range(len(self.elementos)-1, -1, -1):
if el== self.elementos[i]:
del(self.elementos[i])
return self
def cardinal(self):
return len(self.elementos)
#Reconhecedores
def e_conj_vazio(self):
if self.cardinal() == 0:
return True
else:
return False
#Testes
def pertence(self, el):
if el in self.elementos:
return True
else:
return False
#Adicionais
def subconjunto(self, c):
if not isinstance(c, conjunto):
raise ValueError('Bugalhos nao pode ser subconjunto de alhos')
for el in self.elementos:
if not c.pertence(el):
return False
return True
def uniao(self, c):
if not isinstance(c, conjunto):
raise ValueError('Bugalhos nao pode ser unido com alhos')
if self.subconjunto(c)== True:
return c
if c.subconjunto(self)== True:
return self
res=conjunto()
for el in self.elementos:
res.insere(el)
for el in c.elementos:
res.insere(el)
return res
def intersecao(self, c):
if not isinstance(c, conjunto):
raise ValueError('Bugalhos nao pode ser intersetado com alhos')
if self.subconjunto(c)== True:
return self
if c.subconjunto(self)== True:
return c
res=conjunto()
for el in self.elementos:
if c.pertence(el):
res.insere(el)
def diferenca(self,c):
if not isinstance(c, conjunto):
raise ValueError('Bugalhos nao pode ser diferenciado de alhos')
res=conjunto()
if not self.subconjunto(c):
for el in self.elementos:
if not c.pertence(el):
res.insere(el)
return res
#5)
class mem_A:
def __init__(self):
self.values={}
def val(self,m,n):
if (m,n) not in self.values:
if m == 0:
self.values[(m,n)]=n+1
elif m > 0 and n == 0:
self.values[(m,n)]=self.val(m-1,1)
else:
self.values[(m,n)]=self.val(m-1, self.val(m, n-1))
return self.values[(m,n)]
def mem(self):
return self.values
| |
"""
SoftLayer.tests.managers.firewall_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import exceptions
from SoftLayer import fixtures
from SoftLayer import testing
class FirewallTests(testing.TestCase):
def set_up(self):
self.firewall = SoftLayer.FirewallManager(self.client)
def test_get_firewalls(self):
firewall_vlan = {
'id': 1,
'firewallNetworkComponents': [{'id': 1234}],
'networkVlanFirewall': {'id': 1234},
'dedicatedFirewallFlag': True,
'firewallGuestNetworkComponents': [{'id': 1234}],
'firewallInterfaces': [{'id': 1234}],
'firewallRules': [{'id': 1234}],
'highAvailabilityFirewallFlag': True,
}
mock = self.set_mock('SoftLayer_Account', 'getNetworkVlans')
mock.return_value = [firewall_vlan]
firewalls = self.firewall.get_firewalls()
self.assertEqual(firewalls, [firewall_vlan])
self.assert_called_with('SoftLayer_Account', 'getNetworkVlans')
def test_get_standard_fwl_rules(self):
rules = self.firewall.get_standard_fwl_rules(1234)
self.assertEqual(
rules,
fixtures.SoftLayer_Network_Component_Firewall.getRules)
self.assert_called_with('SoftLayer_Network_Component_Firewall',
'getRules',
identifier=1234)
def test_get_dedicated_fwl_rules(self):
rules = self.firewall.get_dedicated_fwl_rules(1234)
self.assertEqual(rules,
fixtures.SoftLayer_Network_Vlan_Firewall.getRules)
self.assert_called_with('SoftLayer_Network_Vlan_Firewall', 'getRules',
identifier=1234)
def test_get_standard_package_virtual_server(self):
# test standard firewalls
self.firewall.get_standard_package(server_id=1234, is_virt=True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'getObject',
identifier=1234,
mask='mask[primaryNetworkComponent[maxSpeed]]')
_filter = {
'items': {
'description': {
'operation': '_= 100Mbps Hardware Firewall'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_standard_package_bare_metal(self):
self.firewall.get_standard_package(server_id=1234, is_virt=False)
# we should ask for the frontEndNetworkComponents to get
# the firewall port speed
mask = 'mask[id,maxSpeed,networkComponentGroup.networkComponents]'
self.assert_called_with('SoftLayer_Hardware_Server',
'getFrontendNetworkComponents',
identifier=1234,
mask=mask)
# shiould call the product package for a 2000Mbps firwall
_filter = {
'items': {
'description': {
'operation': '_= 2000Mbps Hardware Firewall'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_dedicated_package_ha(self):
# test dedicated HA firewalls
self.firewall.get_dedicated_package(ha_enabled=True)
_filter = {
'items': {
'description': {
'operation': '_= Hardware Firewall (High Availability)'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_dedicated_package_pkg(self):
# test dedicated HA firewalls
self.firewall.get_dedicated_package(ha_enabled=False)
_filter = {
'items': {
'description': {
'operation': '_= Hardware Firewall (Dedicated)'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_cancel_firewall(self):
# test standard firewalls
result = self.firewall.cancel_firewall(6327, dedicated=False)
self.assertEqual(result, fixtures.SoftLayer_Billing_Item.cancelService)
self.assert_called_with('SoftLayer_Network_Component_Firewall',
'getObject',
identifier=6327,
mask='mask[id,billingItem[id]]')
self.assert_called_with('SoftLayer_Billing_Item', 'cancelService',
identifier=21370814)
def test_cancel_firewall_no_firewall(self):
mock = self.set_mock('SoftLayer_Network_Component_Firewall', 'getObject')
mock.return_value = None
self.assertRaises(exceptions.SoftLayerError,
self.firewall.cancel_firewall, 6327, dedicated=False)
def test_cancel_firewall_no_billing(self):
mock = self.set_mock('SoftLayer_Network_Component_Firewall', 'getObject')
mock.return_value = {
'id': 6327,
'billingItem': None
}
self.assertRaises(exceptions.SoftLayerError,
self.firewall.cancel_firewall, 6327, dedicated=False)
def test_cancel_dedicated_firewall(self):
# test dedicated firewalls
result = self.firewall.cancel_firewall(6327, dedicated=True)
self.assertEqual(result, fixtures.SoftLayer_Billing_Item.cancelService)
self.assert_called_with('SoftLayer_Network_Vlan_Firewall',
'getObject',
identifier=6327,
mask='mask[id,billingItem[id]]')
self.assert_called_with('SoftLayer_Billing_Item', 'cancelService',
identifier=21370815)
def test_cancel_dedicated_firewall_no_firewall(self):
mock = self.set_mock('SoftLayer_Network_Vlan_Firewall', 'getObject')
mock.return_value = None
self.assertRaises(exceptions.SoftLayerError,
self.firewall.cancel_firewall, 6327, dedicated=True)
def test_cancel_dedicated_firewall_no_billing(self):
mock = self.set_mock('SoftLayer_Network_Vlan_Firewall', 'getObject')
mock.return_value = {
'id': 6327,
'billingItem': None
}
self.assertRaises(exceptions.SoftLayerError,
self.firewall.cancel_firewall, 6327, dedicated=True)
def test_add_standard_firewall_virtual_server(self):
# test standard firewalls for virtual servers
self.firewall.add_standard_firewall(6327, is_virt=True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'getObject',
mask='mask[primaryNetworkComponent[maxSpeed]]',
identifier=6327)
_filter = {
'items': {
'description': {
'operation': '_= 100Mbps Hardware Firewall'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
filter=_filter,
identifier=0)
args = ({'prices': [{'id': 1122}],
'quantity': 1,
'virtualGuests': [{'id': 6327}],
'packageId': 0,
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall'},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_add_standard_firewall_server(self):
# test dedicated firewall for Servers
self.firewall.add_standard_firewall(6327, is_virt=False)
# We should query the product package for a 2000Mbps firewall
_filter = {
'items': {
'description': {
'operation': '_= 2000Mbps Hardware Firewall'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
# we should ask for the frontEndNetworkComponents to get
# the firewall port speed
mask = 'mask[id,maxSpeed,networkComponentGroup.networkComponents]'
self.assert_called_with('SoftLayer_Hardware_Server',
'getFrontendNetworkComponents',
mask=mask,
identifier=6327)
args = ({'hardware': [{'id': 6327}],
'prices': [{'id': 1122}],
'quantity': 1,
'packageId': 0,
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall'},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test__get_fwl_port_speed_server(self):
# Test the routine that calculates the speed of firewall
# required for a server
port_speed = self.firewall._get_fwl_port_speed(186908, False)
self.assertEqual(port_speed, 2000)
def test_add_vlan_firewall(self):
# test dedicated firewall for Vlan
self.firewall.add_vlan_firewall(6327, ha_enabled=False)
_filter = {
'items': {
'description': {
'operation': '_= Hardware Firewall (Dedicated)'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
args = ({'prices': [{'id': 1122}],
'quantity': 1,
'vlanId': 6327,
'packageId': 0,
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall_Dedicated'},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_add_vlan_firewall_ha(self):
# test dedicated firewall for Vlan
self.firewall.add_vlan_firewall(6327, ha_enabled=True)
_filter = {
'items': {
'description': {
'operation': '_= Hardware Firewall (High Availability)'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
args = ({'prices': [{'id': 1122}],
'quantity': 1,
'vlanId': 6327,
'packageId': 0,
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'Protection_Firewall_Dedicated'},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_edit_dedicated_fwl_rules(self):
# test standard firewalls
rules = fixtures.SoftLayer_Network_Vlan_Firewall.getRules
self.firewall.edit_dedicated_fwl_rules(firewall_id=1234,
rules=rules)
args = ({'firewallContextAccessControlListId': 3142,
'rules': rules},)
self.assert_called_with('SoftLayer_Network_Firewall_Update_Request',
'createObject',
args=args)
def test_edit_standard_fwl_rules(self):
# test standard firewalls
rules = fixtures.SoftLayer_Network_Component_Firewall.getRules
self.firewall.edit_standard_fwl_rules(firewall_id=1234,
rules=rules)
args = ({"networkComponentFirewallId": 1234,
"rules": rules},)
self.assert_called_with('SoftLayer_Network_Firewall_Update_Request',
'createObject',
args=args)
| |
"""nlmmedline_xml_format.py
A Martel format to parse the NLM's XML format for Medline.
http://www.nlm.nih.gov/databases/dtd/nlmmedline_011101.dtd
http://www.nlm.nih.gov/databases/dtd/nlmmedlinecitation_011101.dtd
http://www.nlm.nih.gov/databases/dtd/nlmcommon_011101.dtd
Formats:
citation_format Format for one MedlineCitation.
format Format for a whole file.
"""
import warnings
warnings.warn("Bio.Medline.NLMMedlineXML was deprecated, as it does not seem to be able to parse recent Medline XML files. If you want to continue to use this module, please get in contact with the Biopython developers at biopython-dev@biopython.org to avoid permanent removal of this module from Biopython", DeprecationWarning)
import sys
from Martel import *
from Martel import RecordReader
self = sys.modules[__name__]
def _start_elem(element, *attrs):
if attrs:
attr_groups = []
for attr in attrs:
group = Str(attr) + Str("=") + \
Str('"') + Group(attr, Re(r'[^<&"]+')) + Str('"')
attr_groups.append(group)
start = Str("<") + Str(element) + \
Rep(Str(" ") + Alt(*attr_groups)) + \
Str(">")
else:
start = Str("<%s>" % element)
return start
def _end_elem(element):
return Str("</%s>" % element)
def simple_elem(element, *attrs):
"""simple_elem(element, *attrs)
Create a Martel Expression in this module's namespace that will
recognize an XML element in the form of:
<element>data</element>
The whole element must be on a single line. The Expression will
be created in the module's namespace with the same name as the
element.
"""
start, end = _start_elem(element, *attrs), _end_elem(element)
group_name = element
group_expression = Re(r"[^<]+")
expr = start + \
Group(group_name, group_expression) + \
end + \
AnyEol()
setattr(self, element, expr)
# Group expressions. A group consists of the start and end elements
# with an expression in-between. The Expression for the group will be
# called "NAME".
def group_elem(element, expr, *attrs):
start_name, end_name = "%s_start" % element, "%s_end" % element
start_expr = getattr(self, start_name, None)
if start_expr is None:
start_expr = _start_elem(element, *attrs) + AnyEol()
setattr(self, start_name, start_expr)
end_expr = getattr(self, end_name, None)
if end_expr is None:
end_expr = _end_elem(element) + AnyEol()
setattr(self, end_name, end_expr)
group_expr = start_expr + expr + end_expr
group_expr = Group(element, group_expr)
setattr(self, element, group_expr)
######################################################################
# Implement Martel expressions that recognize: #
# http://www.nlm.nih.gov/databases/dtd/nlmcommon_011101.dtd #
######################################################################
########################################
# Personal and Author names
elements = [
"FirstName", "ForeName", "MiddleName", "LastName",
"Initials", "Suffix",
"CollectiveName"
]
[simple_elem(e) for e in elements]
personal_name = LastName + \
Opt(Alt(ForeName, FirstName + Opt(MiddleName))) + \
Opt(Initials) + \
Opt(Suffix)
author_name = Alt(personal_name, CollectiveName)
########################################
# Dates
elements = [
"Year", "Month", "Day",
"Season", "MedlineDate",
"Hour", "Minute", "Second"
]
[simple_elem(e) for e in elements]
normal_date = Year + Month + Day + \
Opt(Hour + Opt(Minute + Opt(Second)))
pub_date = Alt((Year + Opt(Alt((Month + Opt(Day)), Season))), MedlineDate)
simple_elem("CopyrightInformation")
simple_elem("AbstractText")
group_elem("Abstract", AbstractText + Opt(CopyrightInformation))
########################################
# NCBIArticle
simple_elem("NlmUniqueID")
simple_elem("PMID")
simple_elem("SubHeading", "MajorTopicYN")
simple_elem("QualifierName", "MajorTopicYN")
simple_elem("Descriptor", "MajorTopicYN")
simple_elem("DescriptorName", "MajorTopicYN")
group_elem("MeshHeading",
Alt(DescriptorName, Descriptor) + \
Alt(Rep(QualifierName), Rep(SubHeading)))
group_elem("MeshHeadingList", Rep1(MeshHeading))
simple_elem("MedlinePgn")
simple_elem("EndPage")
simple_elem("StartPage")
group_elem("Pagination",
Alt(StartPage + Opt(EndPage) + Opt(MedlinePgn), MedlinePgn))
simple_elem("Affiliation")
group_elem("Author", author_name + Opt(Affiliation))
group_elem("AuthorList", Rep1(Author), "CompleteYN")
simple_elem("Language")
simple_elem("PublicationType")
group_elem("PublicationTypeList", Rep1(PublicationType))
simple_elem("Title") # These were moved up, so that the definitions
simple_elem("Volume") # will be before Book.
simple_elem("VernacularTitle")
simple_elem("CollectionTitle")
simple_elem("ArticleTitle")
simple_elem("Publisher")
group_elem("PubDate", pub_date)
group_elem("Book", PubDate + Publisher + Title +
Opt(AuthorList) + Opt(CollectionTitle) + Opt(Volume))
simple_elem("Country")
simple_elem("MedlineTA")
simple_elem("MedlineCode")
group_elem("MedlineJournalInfo",
Opt(Country) + MedlineTA + Opt(MedlineCode) + Opt(NlmUniqueID))
simple_elem("DateOfElectronicPublication")
simple_elem("ISOAbbreviation")
simple_elem("Coden")
simple_elem("Issue")
group_elem("JournalIssue", Opt(Volume) + Opt(Issue) + PubDate)
simple_elem("ISSN")
group_elem("Journal",
Opt(ISSN) + \
JournalIssue + \
Opt(Coden) + \
Opt(Title) + \
Opt(ISOAbbreviation)
)
simple_elem("GrantID")
simple_elem("Acronym")
simple_elem("Agency")
group_elem("Grant", Opt(GrantID) + Opt(Acronym) + Opt(Agency))
group_elem("GrantList", Rep1(Grant), "CompleteYN")
simple_elem("AccessionNumber")
group_elem("AccessionNumberList", Rep1(AccessionNumber))
simple_elem("DataBankName")
group_elem("DataBank", DataBankName + Opt(AccessionNumberList))
group_elem("DataBankList", Rep1(DataBank), "CompleteYN")
group_elem("Article",
Alt(Journal, Book) + \
ArticleTitle + \
Pagination + \
Opt(Abstract) + \
Opt(Affiliation) + \
Opt(AuthorList) + \
Rep1(Language) + \
Opt(DataBankList) + \
Opt(GrantList) + \
PublicationTypeList + \
Opt(VernacularTitle) + \
Opt(DateOfElectronicPublication)
)
group_elem("NCBIArticle", PMID + Article + Opt(MedlineJournalInfo))
######################################################################
# Implement Martel expressions that recognize: #
# http://www.nlm.nih.gov/databases/dtd/nlmmedlinecitation_011101.dtd #
######################################################################
simple_elem("MedlineID")
simple_elem("Note")
simple_elem("RefSource")
Ref_template = RefSource + Opt(Alt(PMID, MedlineID)) + Opt(Note)
########################################
# MedlineCitation
group_elem("OriginalReportIn", Ref_template)
group_elem("SummaryForPatientsIn", Ref_template)
group_elem("CommentOn", Ref_template)
group_elem("CommentIn", Ref_template)
group_elem("ErratumIn", Ref_template)
group_elem("RepublishedFrom", Ref_template)
group_elem("RepublishedIn", Ref_template)
group_elem("RetractionOf", Ref_template)
group_elem("RetractionIn", Ref_template)
group_elem("UpdateIn", Ref_template)
group_elem("UpdateOf", Ref_template)
group_elem("CommentsCorrections",
Rep(CommentOn) + Rep(CommentIn) + \
Rep(ErratumIn) + \
Rep(RepublishedFrom) + Rep(RepublishedIn) + \
Rep(RetractionOf) + Rep(RetractionIn) + \
Rep(UpdateIn) + Rep(UpdateOf) + \
Rep(SummaryForPatientsIn) + Rep(OriginalReportIn)
)
simple_elem("NumberOfReferences")
group_elem("PersonalNameSubject", personal_name)
group_elem("PersonalNameSubjectList", Rep1(PersonalNameSubject))
simple_elem("GeneSymbol")
group_elem("GeneSymbolList", Rep1(GeneSymbol))
simple_elem("NameOfSubstance")
simple_elem("CASRegistryNumber")
simple_elem("RegistryNumber")
group_elem("Chemical", Alt(CASRegistryNumber, RegistryNumber) + \
NameOfSubstance)
group_elem("ChemicalList", Rep1(Chemical))
simple_elem("CitationSubset")
simple_elem("GeneralNote", "Owner")
group_elem("Investigator", personal_name + Opt(Affiliation))
group_elem("InvestigatorList", Rep1(Investigator))
simple_elem("OtherID", "Source")
simple_elem("SpaceFlightMission")
simple_elem("Keyword", "MajorTopicYN")
group_elem("KeywordList", Rep1(Keyword), "Owner")
group_elem("OtherAbstract",
AbstractText + Opt(CopyrightInformation),
"Type")
group_elem("DateRevised", normal_date)
group_elem("DateCompleted", normal_date)
group_elem("DateCreated", normal_date)
group_elem("MedlineCitation",
Opt(MedlineID) + \
Opt(PMID) + \
DateCreated + \
Opt(DateCompleted) + \
Opt(DateRevised) + \
Article + \
MedlineJournalInfo + \
Opt(ChemicalList) + \
Rep(CitationSubset) + \
Opt(CommentsCorrections) + \
Opt(GeneSymbolList) + \
Opt(MeshHeadingList) + \
Opt(NumberOfReferences) + \
Opt(PersonalNameSubjectList) + \
Rep(OtherID) + \
Rep(OtherAbstract) + \
Rep(KeywordList) + \
Rep(SpaceFlightMission) + \
Opt(InvestigatorList) + \
Rep(GeneralNote),
"Owner", "Status"
)
######################################################################
# Implement Martel expressions that recognize: #
# http://www.nlm.nih.gov/databases/dtd/nlmmedline_011101.dtd #
######################################################################
# The DeleteCitation tags start with spaces, so I have to make a
# special case for it.
space = Any(" \t")
DeleteCitation_start = Rep(space) + Str("<DeleteCitation>") + AnyEol()
DeleteCitation_end = Rep(space) + Str("</DeleteCitation>") + AnyEol()
# The file doesn't always end in a newline, so make MedlineCitationSet
# end in an optional Eol.
MedlineCitationSet_end = Str("</MedlineCitationSet>") + Opt(AnyEol())
group_elem("DeleteCitation", Alt(Rep1(MedlineID), Rep1(PMID)))
group_elem("MedlineCitationSet", Rep(MedlineCitation) + Opt(DeleteCitation))
######################################################################
# Other stuff #
# #
######################################################################
# Should match the proper dtd in here...
DOCTYPE = Str("<!DOCTYPE") + Re(r"[^>]+") + Str(">") + AnyEol()
citation_format = MedlineCitation
# I'm going to use a RecordReader so that I can parse one record at a
# time, instead of sucking the whole XML file into memory. Each
# citation is going to be a record. Thus, the header is everything
# before the first citation and the footer is everything after the
# last citation.
header_format = Group("header", DOCTYPE + MedlineCitationSet_start)
footer_format = Opt(DeleteCitation) + MedlineCitationSet_end
format = HeaderFooter(
None, {},
# Unfortunately, RecordReader.Until doesn't work because some
# MedlineCitations have attributes are in the form
# <MedlineCitation Owner="NLM">. "<MedlineCitation" by itself
# won't differentiate between the beginning of a
# MedlineCitationSet or the beginning of a MedlineCitation. Thus,
# I'm just going to read the first 4 lines and hope that's the
# whole header.
#header_format, RecordReader.Until, ("<MedlineCitation>",),
header_format, RecordReader.CountLines, (4,),
citation_format, RecordReader.EndsWith, ("</MedlineCitation>",),
footer_format, RecordReader.Everything, (),
)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import gast
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.platform import test
class TemplatesTest(test.TestCase):
def test_replace_tuple(self):
template = """
def test_fn(a, c):
return b,
"""
node = templates.replace(template, b=('a', 'c'))[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals((2, 3), result.test_fn(2, 3))
def test_replace_variable(self):
template = """
def test_fn(a):
a += 1
a = 2 * a + 1
return b
"""
node = templates.replace(template, a='b')[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_replace_function_name(self):
template = """
def fname(a):
a += 1
a = 2 * a + 1
return a
"""
node = templates.replace(template, fname='test_fn')[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_replace_code_block(self):
template = """
def test_fn(a):
block
return a
"""
node = templates.replace(
template,
block=[
gast.Assign([
gast.Name('a', None, None)
], gast.BinOp(gast.Name('a', None, None), gast.Add(), gast.Num(1))),
] * 2)[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(3, result.test_fn(1))
def test_replace_attribute(self):
template = """
def test_fn(a):
return a.foo
"""
node = templates.replace(template, foo='b')[0]
result, _ = compiler.ast_to_object(node)
mod = imp.new_module('test')
mod.b = 3
self.assertEquals(3, result.test_fn(mod))
with self.assertRaises(ValueError):
templates.replace(template, foo=1)
def test_replace_attribute_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(
template,
foo=parser.parse_expression('a.b.c'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].value.ctx, gast.Load)
self.assertIsInstance(node.body[0].targets[0].value.value.ctx, gast.Load)
def test_replace_list_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('[a, b]'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_tuple_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('(a, b)'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_expression_context(self):
template = """
def test_fn(foo):
foo
"""
node = templates.replace(
template, foo=parser.parse_expression('a + 2 * b / -c'))[0]
self.assertIsInstance(node.body[0].ctx, gast.Load)
self.assertIsInstance(node.body[0].left.ctx, gast.Load)
self.assertIsInstance(node.body[0].right.left.right.ctx, gast.Load)
def test_replace_complex_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('bar(([a, b],)).baz'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[1].ctx, gast.Load)
def test_replace_index(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('foo(a[b]).bar'))[0]
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.ctx, gast.Load)
self.assertIsInstance(function_call_arg.slice.value.ctx, gast.Load)
def test_replace_call_keyword(self):
template = """
def test_fn():
def f(a, d, f):
return a + d + f
return f(1, kws=None)
"""
source = parser.parse_expression('f(d=3, f=5)')
node = templates.replace(template, kws=source.keywords)[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(9, result.test_fn())
with self.assertRaises(ValueError):
templates.replace(template, kws=[])
templates.replace(template, kws=1)
def test_replace_name_with_call(self):
template = """
def test_fn():
b = 5
def g(a):
return 3 * a
def f():
return g
return foo
"""
source = parser.parse_expression('f()(b)')
node = templates.replace(template, foo=source)[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(15, result.test_fn())
def test_replace_name_with_dict(self):
template = """
def test_fn():
return foo['bar']
"""
source = parser.parse_expression('{\'bar\': 3}')
node = templates.replace(template, foo=source)[0]
result, _ = compiler.ast_to_object(node)
self.assertEquals(3, result.test_fn())
def test_replace_as_expression(self):
template = """
foo(a)
"""
node = templates.replace_as_expression(template, foo='bar', a='baz')
self.assertIsInstance(node, gast.Call)
self.assertEqual(node.func.id, 'bar')
self.assertEqual(node.args[0].id, 'baz')
def test_replace_as_expression_restrictions(self):
template = """
foo(a)
bar(b)
"""
with self.assertRaises(ValueError):
templates.replace_as_expression(template)
def test_function_call_in_list(self):
template = """
foo(bar)
"""
source = parser.parse_expression('[a(b(1))]')
templates.replace_as_expression(template, bar=source)
if __name__ == '__main__':
test.main()
| |
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from taggit.managers import TaggableManager
from dcim.models import BaseInterface, Device
from extras.models import ChangeLoggedModel, ConfigContextModel, CustomFieldModel, ObjectChange, TaggedItem
from extras.querysets import ConfigContextModelQuerySet
from extras.utils import extras_features
from utilities.fields import NaturalOrderingField
from utilities.ordering import naturalize_interface
from utilities.query_functions import CollateAsChar
from utilities.querysets import RestrictedQuerySet
from utilities.utils import serialize_object
from .choices import *
__all__ = (
'Cluster',
'ClusterGroup',
'ClusterType',
'VirtualMachine',
'VMInterface',
)
#
# Cluster types
#
class ClusterType(ChangeLoggedModel):
"""
A type of Cluster.
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'description']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?type={}".format(reverse('virtualization:cluster_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
self.description,
)
#
# Cluster groups
#
class ClusterGroup(ChangeLoggedModel):
"""
An organizational group of Clusters.
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'description']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('virtualization:cluster_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
self.description,
)
#
# Clusters
#
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Cluster(ChangeLoggedModel, CustomFieldModel):
"""
A cluster of VirtualMachines. Each Cluster may optionally be associated with one or more Devices.
"""
name = models.CharField(
max_length=100,
unique=True
)
type = models.ForeignKey(
to=ClusterType,
on_delete=models.PROTECT,
related_name='clusters'
)
group = models.ForeignKey(
to=ClusterGroup,
on_delete=models.PROTECT,
related_name='clusters',
blank=True,
null=True
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='clusters',
blank=True,
null=True
)
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.PROTECT,
related_name='clusters',
blank=True,
null=True
)
comments = models.TextField(
blank=True
)
tags = TaggableManager(through=TaggedItem)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'type', 'group', 'site', 'comments']
clone_fields = [
'type', 'group', 'tenant', 'site',
]
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('virtualization:cluster', args=[self.pk])
def clean(self):
super().clean()
# If the Cluster is assigned to a Site, verify that all host Devices belong to that Site.
if self.pk and self.site:
nonsite_devices = Device.objects.filter(cluster=self).exclude(site=self.site).count()
if nonsite_devices:
raise ValidationError({
'site': "{} devices are assigned as hosts for this cluster but are not in site {}".format(
nonsite_devices, self.site
)
})
def to_csv(self):
return (
self.name,
self.type.name,
self.group.name if self.group else None,
self.site.name if self.site else None,
self.tenant.name if self.tenant else None,
self.comments,
)
#
# Virtual machines
#
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class VirtualMachine(ChangeLoggedModel, ConfigContextModel, CustomFieldModel):
"""
A virtual machine which runs inside a Cluster.
"""
cluster = models.ForeignKey(
to='virtualization.Cluster',
on_delete=models.PROTECT,
related_name='virtual_machines'
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='virtual_machines',
blank=True,
null=True
)
platform = models.ForeignKey(
to='dcim.Platform',
on_delete=models.SET_NULL,
related_name='virtual_machines',
blank=True,
null=True
)
name = models.CharField(
max_length=64
)
status = models.CharField(
max_length=50,
choices=VirtualMachineStatusChoices,
default=VirtualMachineStatusChoices.STATUS_ACTIVE,
verbose_name='Status'
)
role = models.ForeignKey(
to='dcim.DeviceRole',
on_delete=models.PROTECT,
related_name='virtual_machines',
limit_choices_to={'vm_role': True},
blank=True,
null=True
)
primary_ip4 = models.OneToOneField(
to='ipam.IPAddress',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True,
verbose_name='Primary IPv4'
)
primary_ip6 = models.OneToOneField(
to='ipam.IPAddress',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True,
verbose_name='Primary IPv6'
)
vcpus = models.PositiveSmallIntegerField(
blank=True,
null=True,
verbose_name='vCPUs'
)
memory = models.PositiveIntegerField(
blank=True,
null=True,
verbose_name='Memory (MB)'
)
disk = models.PositiveIntegerField(
blank=True,
null=True,
verbose_name='Disk (GB)'
)
comments = models.TextField(
blank=True
)
secrets = GenericRelation(
to='secrets.Secret',
content_type_field='assigned_object_type',
object_id_field='assigned_object_id',
related_query_name='virtual_machine'
)
tags = TaggableManager(through=TaggedItem)
objects = ConfigContextModelQuerySet.as_manager()
csv_headers = [
'name', 'status', 'role', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments',
]
clone_fields = [
'cluster', 'tenant', 'platform', 'status', 'role', 'vcpus', 'memory', 'disk',
]
class Meta:
ordering = ('name', 'pk') # Name may be non-unique
unique_together = [
['cluster', 'tenant', 'name']
]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('virtualization:virtualmachine', args=[self.pk])
def validate_unique(self, exclude=None):
# Check for a duplicate name on a VM assigned to the same Cluster and no Tenant. This is necessary
# because Django does not consider two NULL fields to be equal, and thus will not trigger a violation
# of the uniqueness constraint without manual intervention.
if self.tenant is None and VirtualMachine.objects.exclude(pk=self.pk).filter(
name=self.name, cluster=self.cluster, tenant__isnull=True
):
raise ValidationError({
'name': 'A virtual machine with this name already exists in the assigned cluster.'
})
super().validate_unique(exclude)
def clean(self):
super().clean()
# Validate primary IP addresses
interfaces = self.interfaces.all()
for field in ['primary_ip4', 'primary_ip6']:
ip = getattr(self, field)
if ip is not None:
if ip.assigned_object in interfaces:
pass
elif ip.nat_inside is not None and ip.nat_inside.assigned_object in interfaces:
pass
else:
raise ValidationError({
field: f"The specified IP address ({ip}) is not assigned to this VM.",
})
def to_csv(self):
return (
self.name,
self.get_status_display(),
self.role.name if self.role else None,
self.cluster.name,
self.tenant.name if self.tenant else None,
self.platform.name if self.platform else None,
self.vcpus,
self.memory,
self.disk,
self.comments,
)
def get_status_class(self):
return VirtualMachineStatusChoices.CSS_CLASSES.get(self.status)
@property
def primary_ip(self):
if settings.PREFER_IPV4 and self.primary_ip4:
return self.primary_ip4
elif self.primary_ip6:
return self.primary_ip6
elif self.primary_ip4:
return self.primary_ip4
else:
return None
@property
def site(self):
return self.cluster.site
#
# Interfaces
#
@extras_features('export_templates', 'webhooks')
class VMInterface(BaseInterface):
virtual_machine = models.ForeignKey(
to='virtualization.VirtualMachine',
on_delete=models.CASCADE,
related_name='interfaces'
)
name = models.CharField(
max_length=64
)
_name = NaturalOrderingField(
target_field='name',
naturalize_function=naturalize_interface,
max_length=100,
blank=True
)
description = models.CharField(
max_length=200,
blank=True
)
untagged_vlan = models.ForeignKey(
to='ipam.VLAN',
on_delete=models.SET_NULL,
related_name='vminterfaces_as_untagged',
null=True,
blank=True,
verbose_name='Untagged VLAN'
)
tagged_vlans = models.ManyToManyField(
to='ipam.VLAN',
related_name='vminterfaces_as_tagged',
blank=True,
verbose_name='Tagged VLANs'
)
ip_addresses = GenericRelation(
to='ipam.IPAddress',
content_type_field='assigned_object_type',
object_id_field='assigned_object_id',
related_query_name='vminterface'
)
tags = TaggableManager(
through=TaggedItem,
related_name='vminterface'
)
objects = RestrictedQuerySet.as_manager()
csv_headers = [
'virtual_machine', 'name', 'enabled', 'mac_address', 'mtu', 'description', 'mode',
]
class Meta:
verbose_name = 'interface'
ordering = ('virtual_machine', CollateAsChar('_name'))
unique_together = ('virtual_machine', 'name')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('virtualization:vminterface', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.virtual_machine.name,
self.name,
self.enabled,
self.mac_address,
self.mtu,
self.description,
self.get_mode_display(),
)
def clean(self):
super().clean()
# Validate untagged VLAN
if self.untagged_vlan and self.untagged_vlan.site not in [self.virtual_machine.site, None]:
raise ValidationError({
'untagged_vlan': f"The untagged VLAN ({self.untagged_vlan}) must belong to the same site as the "
f"interface's parent virtual machine, or it must be global"
})
def to_objectchange(self, action):
# Annotate the parent VirtualMachine
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=self.virtual_machine,
object_data=serialize_object(self)
)
@property
def parent(self):
return self.virtual_machine
@property
def count_ipaddresses(self):
return self.ip_addresses.count()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# HotC Server
# https://github.com/vesche/HotC
#
import battle_server
import json
import socket
import sys
import threading
import time
from datetime import datetime
class Server(threading.Thread):
host = '0.0.0.0'
port = 1337
clients = {}
client_count = 1
queue = []
def __init__(self):
super(Server, self).__init__()
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((self.host, self.port))
self.s.listen(5)
def listen(self):
while True:
conn, addr = self.s.accept()
uid = self.client_count
client = ClientConnection(conn, addr, uid)
self.clients[uid] = client
self.client_count += 1
# spawn thread for each client connected
threading.Thread(target = client.login).start()
def get_clients(self):
return [c for _, c in self.clients.items()]
def remove_client(self, k):
return self.clients.pop(k, None)
class ClientConnection():
def __init__(self, conn, addr, uid):
self.conn = conn
self.addr = addr
self.uid = uid
self.status = '?'
self.user = '?'
self.opp = None
self.hero_id = False
self.move = False
self.hp = 5
def disconnect(self):
server.remove_client(self.uid)
self.log_message('disconnected')
self.conn.close()
def load_users(self):
with open('users.json') as f:
return json.load(f)
def log_message(self, msg):
dt = str(datetime.now())[:19]
print '{} - {} {}.'.format(dt, self.user, msg)
def recv_data(self):
data = self.conn.recv(1024)
command, _, arguments = data.partition(' ')
return command, arguments
def login(self):
self.log_message('connected')
logged_in = False
users = self.load_users()
while not logged_in:
command, arguments = self.recv_data()
if command == 'login':
try:
username, password = arguments.split()
except ValueError:
self.conn.send('login_failure')
continue
# check if username and password are correct
for u in users:
if (u['username'] == username) and (u['password'] == password):
logged_in = True
self.conn.send('login_success')
break
else:
self.conn.send('login_failure')
elif command == 'register':
try:
username, password = arguments.split()
except ValueError:
self.conn.send('register_failure')
continue
# check if username already exists
for u in users:
if u['username'] == username:
self.conn.send('register_failure')
break
else:
# register a new user
new_user = { "username": username, "password": password,
"wins": 0, "loses": 0 }
users.append(new_user)
with open('users.json', 'w') as f:
json.dump(users, f)
logged_in = True
self.conn.send('register_success')
elif command == 'quit':
self.disconnect()
return
# drop logged in user into lobby
self.user = username
self.log_message('logged in')
self.status = 'lobby'
self.lobby()
def lobby(self):
while True:
command, _ = self.recv_data()
if command == 'queue':
self.log_message('entered the queue')
# add client to the queue
self.status = 'queue'
my_client = server.clients[self.uid]
server.queue.append(my_client)
# sit in the queue
while True:
try:
# ensure clients match up to each other properly
queue_index = server.queue.index(my_client)
if queue_index % 2 == 0:
opp_client = server.queue[queue_index+1]
elif queue_index % 2 == 1:
opp_client = server.queue[queue_index-1]
break
except IndexError:
# if no opponent found wait a bit and look again
time.sleep(.3)
my_client.status = 'game'
# wait for both of the clients to leave the queue
time.sleep(1)
# remove clients from queue
try:
server.queue.remove(my_client)
server.queue.remove(opp_client)
except ValueError:
pass
# send client opponent info
self.opp = opp_client.user
self.conn.send('match {}'.format(self.opp))
# start game
players = (my_client, opp_client)
self.game(players)
elif command == 'who_online':
clients_online = [(c.user, c.status) for c in server.get_clients()]
self.conn.send(str(clients_online))
elif command == 'highscores':
users = self.load_users()
stats = []
for u in users:
stats.append([u['username'], u['wins'], u['loses']])
self.conn.send(str(stats))
elif command == 'quit':
self.disconnect()
return
def game(self, players):
self.log_message('started a game with {}'.format(players[1].user))
# start battle server
battle_server.run(players)
# reset after match
for p in players:
p.status = 'lobby'
p.opp = None
self.log_message('finished a game with {}'.format(players[1].user))
if __name__ == '__main__':
server = Server()
server.setDaemon(True)
print 'HotC server is listening on port 1337...\n'
try:
server.listen()
except KeyboardInterrupt:
print 'HotC server shutting down!'
sys.exit(1)
| |
import boto3
import json
import pytest
from botocore.exceptions import ClientError
from moto import mock_iot, mock_cognitoidentity
@mock_iot
def test_attach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
@mock_iot
@mock_cognitoidentity
def test_attach_policy_to_identity():
region = "ap-northeast-1"
cognito_identity_client = boto3.client("cognito-identity", region_name=region)
identity_pool_name = "test_identity_pool"
identity_pool = cognito_identity_client.create_identity_pool(
IdentityPoolName=identity_pool_name, AllowUnauthenticatedIdentities=True
)
identity = cognito_identity_client.get_id(
AccountId="test", IdentityPoolId=identity_pool["IdentityPoolId"]
)
client = boto3.client("iot", region_name=region)
policy_name = "my-policy"
doc = "{}"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=identity["IdentityId"])
res = client.list_attached_policies(target=identity["IdentityId"])
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal(policy_name)
@mock_iot
def test_detach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
client.detach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.be.empty
@mock_iot
def test_list_attached_policies():
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
policies = client.list_attached_policies(target=cert["certificateArn"])
policies["policies"].should.be.empty
@mock_iot
def test_policy_versions():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("defaultVersionId").which.should.equal(
policy["defaultVersionId"]
)
policy1 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_1"}),
setAsDefault=True,
)
policy1.should.have.key("policyArn").which.should_not.be.none
policy1.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy1.should.have.key("policyVersionId").which.should.equal("2")
policy1.should.have.key("isDefaultVersion").which.should.equal(True)
policy2 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_2"}),
setAsDefault=False,
)
policy2.should.have.key("policyArn").which.should_not.be.none
policy2.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_2"})
)
policy2.should.have.key("policyVersionId").which.should.equal("3")
policy2.should.have.key("isDefaultVersion").which.should.equal(False)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
policy3 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_3"}),
setAsDefault=False,
)
policy3.should.have.key("policyArn").which.should_not.be.none
policy3.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_3"})
)
policy3.should.have.key("policyVersionId").which.should.equal("4")
policy3.should.have.key("isDefaultVersion").which.should.equal(False)
policy4 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_4"}),
setAsDefault=False,
)
policy4.should.have.key("policyArn").which.should_not.be.none
policy4.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_4"})
)
policy4.should.have.key("policyVersionId").which.should.equal("5")
policy4.should.have.key("isDefaultVersion").which.should.equal(False)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(5)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy1["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
client.set_default_policy_version(
policyName=policy_name, policyVersionId=policy4["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(5)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy4["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_4"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy4["policyVersionId"]
)
with pytest.raises(ClientError) as exc:
client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_5"}),
setAsDefault=False,
)
err = exc.value.response["Error"]
err["Message"].should.equal(
"The policy %s already has the maximum number of versions (5)" % policy_name
)
client.delete_policy_version(policyName=policy_name, policyVersionId="1")
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(4)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy1["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(3)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy2["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(2)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy3["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(1)
# should fail as it"s the default policy. Should use delete_policy instead
with pytest.raises(ClientError) as exc:
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy4["policyVersionId"]
)
err = exc.value.response["Error"]
err["Message"].should.equal("Cannot delete the default version of a policy")
@mock_iot
def test_delete_policy_validation():
doc = """{
"Version": "2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"iot: *"
],
"Resource":"*"
}
]
}
"""
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
policy_name = "my-policy"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
with pytest.raises(ClientError) as e:
client.delete_policy(policyName=policy_name)
e.value.response["Error"]["Message"].should.contain(
"The policy cannot be deleted as the policy is attached to one or more principals (name=%s)"
% policy_name
)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
client.delete_policy(policyName=policy_name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
@mock_iot
def test_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=name)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("defaultVersionId").which.should.equal("1")
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
client.delete_policy(policyName=name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
| |
import random
from pandac.PandaModules import *
from direct.directnotify.DirectNotifyGlobal import *
from toontown.toonbase import TTLocalizer
import random
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
from otp.avatar import AvatarDNA
notify = directNotify.newCategory('SuitDNA')
suitHeadTypes = ['f',
'p',
'ym',
'mm',
'ds',
'hh',
'cr',
'tbc',
'bf',
'b',
'dt',
'ac',
'bs',
'sd',
'le',
'bw',
'sc',
'pp',
'tw',
'bc',
'nc',
'mb',
'ls',
'rb',
'cc',
'tm',
'nd',
'gh',
'ms',
'tf',
'm',
'mh']
suitATypes = ['ym',
'hh',
'tbc',
'dt',
'bs',
'le',
'bw',
'pp',
'nc',
'rb',
'nd',
'tf',
'm',
'mh']
suitBTypes = ['p',
'ds',
'b',
'ac',
'sd',
'bc',
'ls',
'tm',
'ms']
suitCTypes = ['f',
'mm',
'cr',
'bf',
'sc',
'tw',
'mb',
'cc',
'gh']
suitDepts = ['c',
'l',
'm',
's']
suitDeptFullnames = {'c': TTLocalizer.Bossbot,
'l': TTLocalizer.Lawbot,
'm': TTLocalizer.Cashbot,
's': TTLocalizer.Sellbot}
suitDeptFullnamesP = {'c': TTLocalizer.BossbotP,
'l': TTLocalizer.LawbotP,
'm': TTLocalizer.CashbotP,
's': TTLocalizer.SellbotP}
corpPolyColor = VBase4(0.95, 0.75, 0.75, 1.0)
legalPolyColor = VBase4(0.75, 0.75, 0.95, 1.0)
moneyPolyColor = VBase4(0.65, 0.95, 0.85, 1.0)
salesPolyColor = VBase4(0.95, 0.75, 0.95, 1.0)
suitsPerLevel = [1,
1,
1,
1,
1,
1,
1,
1]
suitsPerDept = 8
goonTypes = ['pg', 'sg']
def getSuitBodyType(name):
if name in suitATypes:
return 'a'
elif name in suitBTypes:
return 'b'
elif name in suitCTypes:
return 'c'
else:
print 'Unknown body type for suit name: ', name
def getSuitDept(name):
index = suitHeadTypes.index(name)
if index < suitsPerDept:
return suitDepts[0]
elif index < suitsPerDept * 2:
return suitDepts[1]
elif index < suitsPerDept * 3:
return suitDepts[2]
elif index < suitsPerDept * 4:
return suitDepts[3]
else:
print 'Unknown dept for suit name: ', name
return None
return None
def getDeptFullname(dept):
return suitDeptFullnames[dept]
def getDeptFullnameP(dept):
return suitDeptFullnamesP[dept]
def getSuitDeptFullname(name):
return suitDeptFullnames[getSuitDept(name)]
def getSuitType(name):
index = suitHeadTypes.index(name)
return index % suitsPerDept + 1
def getRandomSuitType(level, rng = random):
return random.randint(max(level - 4, 1), min(level, 8))
def getRandomSuitByDept(dept):
deptNumber = suitDepts.index(dept)
return suitHeadTypes[suitsPerDept * deptNumber + random.randint(0, 7)]
class SuitDNA(AvatarDNA.AvatarDNA):
def __init__(self, str = None, type = None, dna = None, r = None, b = None, g = None):
if str != None:
self.makeFromNetString(str)
elif type != None:
if type == 's':
self.newSuit()
else:
self.type = 'u'
return
def __str__(self):
if self.type == 's':
return 'type = %s\nbody = %s, dept = %s, name = %s' % ('suit',
self.body,
self.dept,
self.name)
elif self.type == 'b':
return 'type = boss cog\ndept = %s' % self.dept
else:
return 'type undefined'
def makeNetString(self):
dg = PyDatagram()
dg.addFixedString(self.type, 1)
if self.type == 's':
dg.addFixedString(self.name, 3)
dg.addFixedString(self.dept, 1)
elif self.type == 'b':
dg.addFixedString(self.dept, 1)
elif self.type == 'u':
notify.error('undefined avatar')
else:
notify.error('unknown avatar type: ', self.type)
return dg.getMessage()
def makeFromNetString(self, string):
dg = PyDatagram(string)
dgi = PyDatagramIterator(dg)
self.type = dgi.getFixedString(1)
if self.type == 's':
self.name = dgi.getFixedString(3)
self.dept = dgi.getFixedString(1)
self.body = getSuitBodyType(self.name)
elif self.type == 'b':
self.dept = dgi.getFixedString(1)
else:
notify.error('unknown avatar type: ', self.type)
return None
def __defaultGoon(self):
self.type = 'g'
self.name = goonTypes[0]
def __defaultSuit(self):
self.type = 's'
self.name = 'ds'
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
def newSuit(self, name = None):
if name == None:
self.__defaultSuit()
else:
self.type = 's'
self.name = name
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
return
def newBossCog(self, dept):
self.type = 'b'
self.dept = dept
def newSuitRandom(self, level = None, dept = None):
self.type = 's'
if level == None:
level = random.choice(range(1, len(suitsPerLevel)))
elif level < 0 or level > len(suitsPerLevel):
notify.error('Invalid suit level: %d' % level)
if dept == None:
dept = random.choice(suitDepts)
self.dept = dept
index = suitDepts.index(dept)
base = index * suitsPerDept
offset = 0
if level > 1:
for i in range(1, level):
offset = offset + suitsPerLevel[i - 1]
bottom = base + offset
top = bottom + suitsPerLevel[level - 1]
self.name = suitHeadTypes[random.choice(range(bottom, top))]
self.body = getSuitBodyType(self.name)
return
def newGoon(self, name = None):
if type == None:
self.__defaultGoon()
else:
self.type = 'g'
if name in goonTypes:
self.name = name
else:
notify.error('unknown goon type: ', name)
return
def getType(self):
if self.type == 's':
type = 'suit'
elif self.type == 'b':
type = 'boss'
else:
notify.error('Invalid DNA type: ', self.type)
return type
| |
"""Module for interactive demos using IPython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to IPython.
Provided classes
================
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Subclassing
===========
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
=========
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via:
from IPython.demo import Demo
d = Demo('ex_demo.py')
d() <--- Call the d object (omit the parens if you have autocall set to 2).
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Example
=======
The following is a very simple example of a valid demo file.
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import exceptions
import os
import re
import shlex
import sys
from IPython.PyColorize import Parser
from IPython.genutils import marquee, file_read, file_readlines
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(exceptions.Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-?\s?stop\s?-?')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,fname,arg_str='',auto_all=None):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- fname = filename.
Optional inputs:
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
"""
self.fname = fname
self.sys_argv = [fname] + shlex.split(arg_str)
self.auto_all = auto_all
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
self.ip_ns = __IPYTHON__.user_ns
self.ip_colorize = __IPYTHON__.pycolorize
self.ip_showtb = __IPYTHON__.showtraceback
self.ip_runlines = __IPYTHON__.runlines
self.shell = __IPYTHON__
# load user data and initialize data structures
self.reload()
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.src = file_read(self.fname)
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print 'Demo finished. Use reset() if you want to rerun it.'
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
new_block = file_read(filename)
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.ip_colorize(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print self.marquee('<%s> block # %s (%s remaining)' %
(self.fname,index,self.nblocks-index-1))
sys.stdout.write(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.fname
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print marquee('<%s> SILENT block # %s (%s remaining)' %
(fname,index,nblocks-index-1))
else:
print marquee('<%s> block # %s (%s remaining)' %
(fname,index,nblocks-index-1))
print block,
sys.stdout.flush()
def runlines(self,source):
"""Execute a string with one or more lines of code"""
exec source in self.user_ns
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print marquee('output:')
else:
print marquee('Press <q> to quit, <Enter> to execute...'),
ans = raw_input().strip()
if ans:
print marquee('Block NOT executed')
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.runlines(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
self.ip_showtb(filename=self.fname)
else:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious prints if empty marquees are used
print
print mq1
print self.marquee('Use reset() if you want to rerun it.')
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def runlines(self,source):
"""Execute a string with one or more lines of code"""
self.shell.runlines(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
src_b = [l for l in file_readlines(self.fname) if l.strip()]
nblocks = len(src_b)
self.src = os.linesep.join(file_readlines(self.fname))
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by IPython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
os.system('clear')
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
| |
#!/usr/bin/env python
#Pupil_ZMQ_ROS Publisher
#ZMQ subscriber to receive gaze and world
#Start ROS node to publish gaze_positions, and world image
#Using customized ROS msg: gaze_positions, gaze, pupil, pupil_positions and surface_position
#Standard imports
import zmq
import sys
import roslib
import rospy
import numpy as np
import cv2
#Specific imports
from msgpack import loads
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from pupil_ros.msg import gaze_positions, gaze, pupil, pupil_positions, surface_position
from std_msgs.msg import Header
from geometry_msgs.msg import Point
roslib.load_manifest('pupil_ros')
#Convert the first three elements of a tuple to ROS Geometry Point Message
def tupleToPoint(tup):
p = Point()
if tup:
l = len(tup)
if l == 0:
return p
elif l == 1:
p.x = tup[0]
return p
elif l == 2:
p.x, p.y = tup[0], tup[1]
else:
p.x, p.y, p.z = tup[0], tup[1], tup[2]
return p
#Pupil_ZMQ_ROS: a standalone class to interface Pupil ZMQ messages and ROS environment.
class Pupil_ZMQ_ROS(object):
def __init__(self, addr='localhost', req_port='50010'): #Port can change
#Initialize Pupil_ZMQ_ROS object, init ZMQ and ROS
self.zmq_req = None
self.zmq_sub = None
self.ros_gaze_publisher = None
self.ros_pupil_publisher = None
self.ros_world_img_publisher = None
self.cv_bridge = CvBridge()
self.ros_started = True
self.seq = 0
self.init_zmq(addr, req_port)
self.init_ros()
#Initialize ZMQ subscriber
def init_zmq(self, addr, req_port):
context = zmq.Context()
self.zmq_req = context.socket(zmq.REQ)
self.zmq_req.connect("tcp://%s:%s" %(addr,req_port))
# ask for the sub port
self.zmq_req.send('SUB_PORT')
sub_port = self.zmq_req.recv()
# open a sub port to listen to pupil
self.zmq_sub = context.socket(zmq.SUB)
self.zmq_sub.connect("tcp://%s:%s" %(addr,sub_port))
# set zmq subscriptions to topics
self.zmq_sub.setsockopt(zmq.SUBSCRIBE, 'pupil.')
self.zmq_sub.setsockopt(zmq.SUBSCRIBE, 'gaze')
self.zmq_sub.setsockopt(zmq.SUBSCRIBE, 'frame')
self.zmq_sub.setsockopt(zmq.SUBSCRIBE, 'surface')
print 'Pupil_ZMQ_ROS: zmq environment initialized'
#Initialize ROS node, four publishers on four topics: gaze, pupil, world and surface
def init_ros(self):
try:
rospy.init_node('Pupil_ZMQ_ROS', anonymous=True)
self.ros_gaze_publisher = rospy.Publisher('/pupil_capture/gaze', gaze_positions, queue_size=10)
self.ros_pupil_publisher = rospy.Publisher('/pupil_capture/pupil', pupil_positions, queue_size=10)
self.ros_world_img_publisher = rospy.Publisher('/pupil_capture/world', Image, queue_size=2)
self.ros_surface_publisher = rospy.Publisher('/pupil_capture/surface', surface_position, queue_size=10)
self.ros_started = True
self.seq = 0
print 'Pupil_ZMQ_ROS: ros environment initialized'
#ROS except
except rospy.ROSInterruptException as e:
self.ros_started = False
self.seq = 0
print 'Pupil_ZMQ_ROS: unable to start ros node:', e
#Spinning loop: receive ZMQ data and publish to ROS topics
def spin(self):
if not self.ros_started:
print 'Pupil_ZMQ_ROS: ros not started'
return
# rospy.is_shutdown check inside while loop to enable Ctrl-C termination
while True:
if rospy.is_shutdown():
break
# receive message from ZMQ subscriber
zmq_multipart = self.zmq_sub.recv_multipart()
zmq_topic, zmq_raw_msg = zmq_multipart[0], zmq_multipart[1]
# ROS header message
header = Header()
header.seq = self.seq
header.stamp = rospy.get_rostime()
header.frame_id = "Pupil_ZMQ_ROS"
zmq_msg = loads(zmq_raw_msg)
#Scan for topic name:surface
if 'surface' in zmq_topic:
gaze_position = loads(zmq_raw_msg, encoding='utf-8')
gaze_on_screen = gaze_position['gaze_on_srf']
if len(gaze_on_screen) > 0:
raw_x, raw_y = gaze_on_screen[-1]['norm_pos']
on_srf = gaze_on_screen[0]['on_srf']
surface_info = surface_position()
surface_info.x = raw_x
surface_info.y = raw_y
surface_info.onsrf = on_srf
self.ros_surface_publisher.publish(surface_info)
#Scan for topic name:pupil
if 'pupil' in zmq_topic:
pupil_msg = pupil_positions()
pupil_msg.header = header
pupil_info_list = []
pupil_info = pupil()
pupil_info.diameter = zmq_msg['diameter']
pupil_info.confidence = zmq_msg['confidence']
pupil_info.projected_sphere_axes = tupleToPoint(zmq_msg['projected_sphere'].get('axes'))
pupil_info.projected_sphere_angle = zmq_msg['projected_sphere'].get('angle')
pupil_info.projected_sphere_center = tupleToPoint(zmq_msg['projected_sphere'].get('center'))
pupil_info.model_id = zmq_msg['model_id']
pupil_info.model_confidence = zmq_msg['model_confidence']
pupil_info.pupil_timestamp = zmq_msg['timestamp']
pupil_info.model_birth_timestamp = zmq_msg['model_birth_timestamp']
pupil_info.topic = zmq_msg['topic']
pupil_info.sphere_radius = zmq_msg['sphere'].get('radius')
pupil_info.sphere_center = tupleToPoint(zmq_msg['sphere'].get('center'))
pupil_info.diameter_3d = zmq_msg['diameter_3d']
pupil_info.ellipse_axes = tupleToPoint(zmq_msg['ellipse'].get('axes'))
pupil_info.ellipse_angle = zmq_msg['ellipse'].get('angle')
pupil_info.ellipse_center = tupleToPoint(zmq_msg['ellipse'].get('center'))
pupil_info.norm_pos = tupleToPoint(zmq_msg['norm_pos'])
pupil_info.phi = zmq_msg['phi']
pupil_info.theta = zmq_msg['theta']
pupil_info.circle_3d_radius = zmq_msg['circle_3d'].get('radius')
pupil_info.circle_3d_center = tupleToPoint(zmq_msg['circle_3d'].get('center'))
pupil_info.circle_3d_normal = tupleToPoint(zmq_msg['circle_3d'].get('normal'))
pupil_info.id = zmq_msg['id']
pupil_info_list.append(pupil_info)
pupil_msg.pupils = pupil_info_list
self.ros_pupil_publisher.publish(pupil_msg)
#Gaze data after combining pupil data and gaze mapping plugin
if zmq_topic == 'gaze':
gaze_msg = gaze_positions()
gaze_info_list = []
gaze_info = gaze()
gaze_info.confidence = zmq_msg['confidence']
gaze_info.norm_pos = tupleToPoint(zmq_msg.get('norm_pos'))
gaze_info.gaze_point_3d = tupleToPoint(zmq_msg.get('gaze_point_3d'))
gaze_info.gaze_normal_3d = tupleToPoint(zmq_msg.get('gaze_normal_3d'))
gaze_info.eye_center_3d = tupleToPoint(zmq_msg.get('eye_center_3d'))
gaze_info.pupil_timestamp = zmq_msg['timestamp']
gaze_info_list.append(gaze_info)
gaze_msg.gazes = gaze_info_list
gaze_msg.header = header
self.ros_gaze_publisher.publish(gaze_msg)
#Scan for topic name:frame.world
if 'frame.world' in zmq_topic:
if zmq_msg['format'] == 'bgr':
cv_img = np.frombuffer(zmq_multipart[2], dtype=np.uint8).reshape( zmq_msg['height'], zmq_msg['width'], 3)
world_image_msg = self.cv_bridge.cv2_to_imgmsg(cv_img, encoding="bgr8")
world_image_msg.header = header
self.ros_world_img_publisher.publish(world_image_msg)
# Disable ROS interface
self.ros_started = False
#Main spin
if __name__ == "__main__":
zmq_ros_pub = None
if len(sys.argv) >= 3:
zmq_ros_pub = Pupil_ZMQ_ROS(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 2:
zmq_ros_pub = Pupil_ZMQ_ROS(sys.argv[1])
else:
zmq_ros_pub = Pupil_ZMQ_ROS()
# Spinning on ZMQ messages, terminated by Ctrl-C
zmq_ros_pub.spin()
| |
# %load ../../src/feature/feature_utils.py
# %%writefile ../../src/features/feature_utils.py
"""
Author: Jim Clauwaert
Created in the scope of my PhD
"""
import pandas as pd
import numpy as np
def AlignSequences(dfSequences, pw=False):
"""Align sequences with eachother in the dataframe, adding '-' for
missing nucleotides before and after shorter sequences, and changing the 35- and 10-box
reference regions accordingly
Parameters
-----------
dfSequences : DataFrame
Dataframe containing columns sequence and reference start regions
DataFrame.columns has to contain ['sequence', '35boxstart', '10boxstart']
Returns
--------
dfAlignedSequences : Dataframe
Dataframe containing aligned sequences
"""
if pw is True:
start35Box_1 = dfSequences['35boxstart_1'].values
start35Box_2 = dfSequences['35boxstart_2'].values
start35BoxMax = start35Box_1.max() if start35Box_1.max()>start35Box_2.max() else start35Box_2.max()
dfAlignedSequences = dfSequences.copy()
for i in range(1,3):
sequences = dfSequences['sequence_{}'.format(i)].values
start35Box = dfSequences['35boxstart_{}'.format(i)].values
start10Box = dfSequences['10boxstart_{}'.format(i)].values
difLength = start35BoxMax-start35Box
sequenceAligned = ["-" *difLength[u]+sequences[u] if difLength[u]>=0 else sequences[u][abs(difLength[u]):] for u in range(np.shape(sequences)[0]) ]
start35Box = np.array([start35Box[u]+difLength[u] for u in range(np.shape(sequences)[0])])
start10Box = np.array([start10Box[u]+difLength[u] for u in range(np.shape(sequences)[0])])
maxLength = max([len(sequenceAligned[u]) for u in range(np.shape(sequenceAligned)[0])])
difLength = [maxLength - len(sequenceAligned[u]) for u in range(np.shape(sequenceAligned)[0])]
sequences = [sequenceAligned[u]+ '-'*difLength[u] for u in range(np.shape(sequenceAligned)[0]) ]
dfAlignedSequences['sequence_{}'.format(i)] = sequences
dfAlignedSequences['35boxstart_{}'.format(i)] = start35Box
dfAlignedSequences['10boxstart_{}'.format(i)] = start10Box
else:
sequences = dfSequences['sequence'].values
start35Box = dfSequences['35boxstart'].values
start10Box = dfSequences['10boxstart'].values
difLength = start35Box.max()-start35Box
sequenceAligned = ["-" *difLength[u]+sequences[u] if difLength[u]>=0 else sequences[u][abs(difLength[u]):] for u in range(np.shape(sequences)[0]) ]
start35Box = np.array([start35Box[u]+difLength[u] for u in range(np.shape(sequences)[0])])
start10Box = np.array([start10Box[u]+difLength[u] for u in range(np.shape(sequences)[0])])
maxLength = max([len(sequenceAligned[u]) for u in range(np.shape(sequenceAligned)[0])])
difLength = [maxLength - len(sequenceAligned[u]) for u in range(np.shape(sequenceAligned)[0])]
sequences = [sequenceAligned[u]+ '-'*difLength[u] for u in range(np.shape(sequenceAligned)[0]) ]
dfAlignedSequences = dfSequences.copy()
dfAlignedSequences['sequence'] = sequences
dfAlignedSequences['35boxstart'] = start35Box
dfAlignedSequences['10boxstart'] = start10Box
return dfAlignedSequences
def CreateDummyNucleotideFeatures(sequences, posRange):
"""Create dummy dataframe of nucleotides for two regions surrounding 35- and 10-box
Parameters
-----------
sequences : 1-dimensional numpy array
numpy array containing an array of sequences (str)
posRange : tuple, 2-element array
tuple containing range of the sequence off of which dummy features will be created. This range is
used to create column names for the obtained dummy features
Returns
--------
dfDummyDataFrame : Dataframe
Dataframe containing dummy features
"""
# Create Position Matrix
nucleotideMatrix = ChopStringVector(sequences)
# Convert to Dataframe
posRangeCol = [str(x) for x in range(posRange[0],posRange[1])]
dfNucleotideMatrix = pd.DataFrame(nucleotideMatrix, columns = posRangeCol)
# Create dummy Matrix
dfDummyNucleotideFeatures = pd.get_dummies(dfNucleotideMatrix)
return dfDummyNucleotideFeatures
def ChopStringVector(strings):
"""Chops a vector of strings (equal length) into a matrix of characters, with each row containing a separate
string
Parameters
-----------
strings : 1-dimensional numpy array
numpy array containing array of strings
Returns
--------
charMatrix : 2-dimensional numpy array
Matrix containing chopped up strings
"""
charMatrix = np.empty([len(strings),len(strings[0])],dtype=np.dtype(str,1))
strings=np.array(strings)
for i in range(0,len(strings)):
charMatrix[i] = [strings[i][u] for u in range(0,len(strings[i]))]
return charMatrix
def CreateFeaturesFromData(data, seqRegions, pw, shuffle=True):
"""Creates features from
data: string
PATH or filename of dataset
seqRegions : tuple,list
List containing two positional ranges from which features are derived, respectively starting from first
nucleotide of 35-box and 10-box
Example: [[0,6],[0,6]] returns positional features of the range of the -35box and -10box respectively
"""
if type(data) is not list:
data = [data]
dfFeatureBoxList = []
dfDatasetList = []
for d in data:
dfDataset = pd.read_csv(d)
dfDataset = AlignSequences(dfDataset, pw)
if shuffle is True:
dfDataset = dfDataset.reindex(np.random.permutation(dfDataset.index))
if pw is True:
dfFeatureBox = PositionalFeaturesPW(dfDataset, seqRegions)
else:
dfFeatureBox = PositionalFeatures(dfDataset, seqRegions)
dfFeatureBoxList.append(dfFeatureBox)
dfDatasetList.append(dfDataset)
featureBox = pd.concat(dfFeatureBoxList)
dfDataset = pd.concat(dfDatasetList)
return dfDataset, featureBox
def CreateFullDummyDataFrame(posRange):
"""Creates a dummy nucleotide feature dataframe over a specified range for promotors. '-' is added for
nucleotide positions <-35 or >-3
Parameters
-----------
posRange : tuple
Range over which the full nucleotide dummy dataframe is created
Returns
--------
fullDummyDataframe : DataFrame
Dataframe containing all possible dummy features for positional nucleotides
"""
posRangeCol = [str(x) for x in range(posRange[0],posRange[1])]
length = len(posRangeCol)
a = np.empty([length],dtype=np.dtype(str,1))
c = np.empty([length],dtype=np.dtype(str,1))
t = np.empty([length],dtype=np.dtype(str,1))
g = np.empty([length],dtype=np.dtype(str,1))
dash = np.empty([length],dtype=np.dtype(str,1))
a.fill('A')
t.fill('T')
c.fill('C')
g.fill('G')
dash.fill('A')
dash[:(-posRange[0]-35)]='-'
dash[(-posRange[0]-3):]='-'
dataframe = pd.DataFrame(np.vstack((a,t,c,g,dash)),columns=posRangeCol)
fullDummyDataFrame = pd.get_dummies(dataframe)
return fullDummyDataFrame
def PositionalFeatures(dfDataset, seqRegions):
"""Create position features for a given promoter dataset. Returns dummy dataset.
Parameters
-----------
dfDataset : DataFrame
Dataframe containing columns sequence and reference start regions
columnNames = (sequence, 35boxstart, 10boxstart)
seqRegions : tuple,list
List containing two positional ranges from which features are derived, respectively starting from first
nucleotide of 35-box and 10-box
Example: [[0,6],[0,6]] returns positional features of the range of the -35box and -10box respectively.
shuffle : Boolean
Shuffles input dataset
Returns
--------
dfDataset : DataFrame
Shuffled dataset
dfPositionBox : DataFrame
Dataframe containing dummy arguments
"""
# Selecting regions
dfDataset['sequence'] = dfDataset['sequence'].str.upper()
sequences = dfDataset['sequence'].values
start35Box = dfDataset['35boxstart'].values
start10Box = dfDataset['10boxstart'].values
seqRegions = np.array(seqRegions)
posRange = [seqRegions[0,0]-35,seqRegions[1,1]-12]
reg35 = np.array(seqRegions[0])
reg10 = np.array(seqRegions[1])
box35 = SelectRegions(sequences, reg35, start35Box)
box10 = SelectRegions(sequences, reg10, start10Box)
spacer = start10Box-start35Box-6
spacerM = [u-17 if u-17>0 else 0 for u in spacer]
spacerL = [abs(u-17) if u-17<0 else 0 for u in spacer]
spacerBox = pd.DataFrame({'spacer_more':spacerM,'spacer_less':spacerL})
# Creating features
positionBox35 = CreateDummyNucleotideFeatures(box35, reg35-35)
positionBox10 = CreateDummyNucleotideFeatures(box10, reg10-12)
positionBoxSS = pd.concat([positionBox35,positionBox10], axis=1)
dfTemplateDummyBox = CreateFullDummyDataFrame(posRange)
dfFinalBox = pd.DataFrame(0, range(len(sequences)),columns=dfTemplateDummyBox.columns)
colNamesResult = positionBoxSS.columns
for i in colNamesResult:
if i in dfFinalBox.columns:
dfFinalBox[i] = positionBoxSS[i]
dfPositionBox = pd.concat([dfFinalBox, spacerBox], axis=1)
return dfPositionBox
def PositionalFeaturesPW(dfDataset, seqRegions, subtract=True):
"""Create position features of a pairwise promoter dataset. Returns matrix of dummy features.
Parameters
-----------
dfDataset : DataFrame
Dataframe containing promoter data from pairwise dataset
columnNames = (sequence_1, 35boxstart_1, 10boxstart_1, sequence_2, 35boxstart_2, 10boxstart_2)
seqRegions : tuple,list
List containing two positional ranges from which features are derived, respectively starting from first
nucleotide of 35-box and 10-box
Example: [[0,6],[0,6]] returns positional features of the range of the -35box and -10box respectively
shuffle : Boolean
Shuffles input dataset
merge : Boolean
subtract dummy features of pairwise sequences. Reduces returned features by half
Returns
--------
dfPositionBox : DataFrame
Dataframe containing dummy arguments
dfDataset : DataFrame
Shuffled dataset
"""
dfDataset['sequence_1'] = dfDataset['sequence_1'].str.upper()
dfDataset['sequence_2'] = dfDataset['sequence_2'].str.upper()
dfDataset1 = pd.DataFrame(dfDataset[['ID_1','sequence_1','mean_score_1','35boxstart_1','10boxstart_1']].values,columns=['ID','sequence','mean_score','35boxstart','10boxstart'])
dfDataset2 = pd.DataFrame(dfDataset[['ID_2','sequence_2','mean_score_2','35boxstart_2','10boxstart_2']].values,columns=['ID','sequence','mean_score','35boxstart','10boxstart'])
dfPositionBoxSeq1 = PositionalFeatures(dfDataset1, seqRegions)
dfPositionBoxSeq2 = PositionalFeatures(dfDataset2, seqRegions)
if subtract is True:
dfPositionBox = pd.DataFrame(np.subtract(dfPositionBoxSeq1.values,dfPositionBoxSeq2.values),columns=dfPositionBoxSeq1.columns)
else:
dfPositionBox = pd.concat([dfPositionBoxSeq1, dfPositionBoxSeq2], axis=1)
return dfPositionBox
def SelectRegions(sequences, ntRange, referencePoint=None):
"""Selects substring or sequence nucleotides
Parameters
-----------
sequences : 1-dimensional numpy array
numpy array containing an array of sequences (str)
ntRange : tuple
range of nucleotides with respect to a reference point
referencePoint: 1-dimensional numpy array
numpy array containing the positional reference point for each sequence given in 'sequences'
Returns
--------
selectedNucleotides : 1-dimensional numpy array
numpy array containing the nucleotide fragments from each region
"""
if referencePoint.all == None:
selectedNucleotides = [sequences[u][ntRange[0]:ntRange[1]] for u in range(0,len(sequences))]
else:
selectedNucleotides = []
for u in range(0,len(sequences)):
pre = ntRange[0]+referencePoint[u]
diff = ntRange[1]+referencePoint[u] -len(sequences[u])
if pre<0 and diff>0:
nucleotide = str(abs(pre)*'-')+sequences[u][ntRange[0]+referencePoint[u]-pre:
ntRange[1]+referencePoint[u]-diff] +diff*'-'
elif pre<0 and diff<=0:
nucleotide = str(abs(pre)*'-')+sequences[u][ntRange[0]+referencePoint[u]-pre:
ntRange[1]+referencePoint[u]]
elif pre>=0 and diff>0:
nucleotide = sequences[u][ntRange[0]+referencePoint[u]:ntRange[1]+referencePoint[u]-diff] +diff*'-'
elif pre>=0 and diff<=0:
nucleotide = sequences[u][ntRange[0]+referencePoint[u]:ntRange[1]+referencePoint[u]]
selectedNucleotides.append(nucleotide)
return selectedNucleotides
| |
import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils.translation import gettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"]
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with self.assertRaisesMessage(TypeError, "'Article' instance expected, got <Reporter:"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Implied __exact also works
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
["<Article: John's second story>", "<Article: This is a test>"]
)
# ... and should work fine with the string that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = ["<Reporter: John Smith>"]
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith)
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
john_smith
)
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith)
# Implied __exact also works.
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
qs = Article.objects.filter(
reporter=self.r,
).distinct().order_by().values('reporter__first_name', 'reporter__last_name')
self.assertEqual([d], list(qs))
def test_select_related(self):
# Article.objects.select_related().dates() works properly when there
# are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'day')),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'month')),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'year')),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)]
)
def test_delete(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(
Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>", "<Reporter: Paul Jones>"]
)
self.r2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
["<Article: John's second test>", "<Article: This is a test>"]
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
msg = 'get() returned more than one Article -- it returned 2!'
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)))
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_gettext_lazy(self):
reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='john.smith@example.com')
lazy = gettext_lazy('test')
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = str(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<First: First object (1)>": "Child.parent" must '
'be a "Parent" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name='xyzzy', parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
msg = 'Cannot assign "%r": "Child.parent" must be a "Parent" instance.' % c
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# The <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertSequenceEqual(School.objects.all(), [public_school])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
def test_clear_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.clear()
self.assertQuerysetEqual(city.districts.all(), [])
def test_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
d = District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.remove(d)
self.assertQuerysetEqual(city.districts.all(), [])
def test_add_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
def test_set_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.set([d2])
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladidu>'])
def test_add_then_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
city.districts.remove(d2)
self.assertEqual(city.districts.count(), 1)
| |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 Max Hantke, Filipe R.N.C. Maia, Tomas Ekeberg
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
# TO DO:
# Take into account illumination profile
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import numpy, os, sys, copy
import logging
logger = logging.getLogger(__name__)
from condor.utils.log import log,log_execution_time
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
import condor.utils.config
from condor.utils.pixelmask import PixelMask
import condor.utils.sphere_diffraction
import condor.utils.spheroid_diffraction
import condor.utils.scattering_vector
import condor.utils.resample
from condor.utils.rotation import Rotation
import condor.particle
import condor.utils.nfft
def experiment_from_configfile(configfile):
"""
Initialise Experiment instance from configuration file
*See also:*
- :class:`condor.experiment.Experiment`
- `Composing a configuration file <configfile.html>`_
"""
# Read configuration file into dictionary
C = condor.utils.config.read_configfile(configfile)
return experiment_from_configdict(C)
def experiment_from_configdict(configdict):
"""
Initialise Experiment instance from a dictionary
*See also:*
- :class:`condor.experiment.Experiment`
- `Composing a configuration file <configdict.html>`_
"""
# Source
source = condor.Source(**configdict["source"])
# Particles
particle_keys = [k for k in configdict.keys() if k.startswith("particle")]
particles = {}
if len(particle_keys) == 0:
log_and_raise_error(logger, "No particles defined.")
for k in particle_keys:
if k.startswith("particle_sphere"):
particles[k] = condor.ParticleSphere(**configdict[k])
elif k.startswith("particle_spheroid"):
particles[k] = condor.ParticleSpheroid(**configdict[k])
elif k.startswith("particle_map"):
particles[k] = condor.ParticleMap(**configdict[k])
elif k.startswith("particle_atoms"):
particles[k] = condor.ParticleAtoms(**configdict[k])
else:
log_and_raise_error(logger,"Particle model for %s is not implemented." % k)
# Detector
detector = condor.Detector(**configdict["detector"])
experiment = Experiment(source, particles, detector)
return experiment
class Experiment:
"""
Class for X-ray diffraction experiment
Args:
:source: Source instance
:particles: Dictionary of particle instances
:detector: Detector instance
"""
def __init__(self, source, particles, detector):
self.source = source
for n,p in particles.items():
if n.startswith("particle_sphere"):
if not isinstance(p, condor.particle.ParticleSphere):
log_and_raise_error(logger, "Particle %s is not a condor.particle.ParticleSphere instance." % n)
elif n.startswith("particle_spheroid"):
if not isinstance(p, condor.particle.ParticleSpheroid):
log_and_raise_error(logger, "Particle %s is not a condor.particle.ParticleSpheroid instance." % n)
elif n.startswith("particle_map"):
if not isinstance(p, condor.particle.ParticleMap):
log_and_raise_error(logger, "Particle %s is not a condor.particle.ParticleMap instance." % n)
elif n.startswith("particle_atoms"):
if not isinstance(p, condor.particle.ParticleAtoms):
log_and_raise_error(logger, "Particle %s is not a condor.particle.ParticleAtoms instance." % n)
else:
log_and_raise_error(logger, "The particle model name %s is invalid. The name has to start with either particle_sphere, particle_spheroid, particle_map or particle_atoms.")
self.particles = particles
self.detector = detector
self._qmap_cache = {}
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured Experiment instance can be initialised by:
.. code-block:: python
conf = E0.get_conf() # E0: already existing Experiment instance
E1 = condor.experiment_from_configdict(conf) # E1: new Experiment instance with the same configuration as E0
"""
conf = {}
conf.update(self.source.get_conf())
for n,p in self.particles.items():
conf[n] = p.get_conf()
conf.update(self.detector.get_conf())
return conf
def _get_next_particles(self):
D_particles = {}
while len(D_particles) == 0:
i = 0
for p in self.particles.values():
n = p.get_next_number_of_particles()
for i_n in range(n):
D_particles["particle_%02i" % i] = p.get_next()
i += 1
N = len(D_particles)
if N == 0:
log_info(logger, "Miss - no particles in the interaction volume. Shooting again...")
else:
log_debug(logger, "%i particles" % N)
return D_particles
@log_execution_time(logger)
def propagate(self, save_map3d=False, save_qmap=False):
return self._propagate(save_map3d=save_map3d, save_qmap=save_qmap, ndim=2)
def propagate3d(self, qn=None, qmax=None):
return self._propagate(ndim=3, qn=qn, qmax=qmax)
def _propagate(self, save_map3d=False, save_qmap=False, ndim=2, qn=None, qmax=None):
if ndim not in [2,3]:
log_and_raise_error(logger, "ndim = %i is an invalid input. Has to be either 2 or 3." % ndim)
log_debug(logger, "Start propagation")
# Iterate objects
D_source = self.source.get_next()
D_particles = self._get_next_particles()
D_detector = self.detector.get_next()
# Pull out variables
nx = D_detector["nx"]
ny = D_detector["ny"]
cx = D_detector["cx"]
cy = D_detector["cy"]
pixel_size = D_detector["pixel_size"]
detector_distance = D_detector["distance"]
wavelength = D_source["wavelength"]
# Qmap without rotation
if ndim == 2:
qmap0 = self.detector.generate_qmap(wavelength, cx=cx, cy=cy, extrinsic_rotation=None)
else:
qmax = numpy.sqrt((self.detector.get_q_max(wavelength, pos="edge")**2).sum())
qn = max([nx, ny])
qmap0 = self.detector.generate_qmap_3d(wavelength, qn=qn, qmax=qmax, extrinsic_rotation=None, order='xyz')
if self.detector.solid_angle_correction:
log_and_raise_error(logger, "Carrying out solid angle correction for a simulation of a 3D Fourier volume does not make sense. Please set solid_angle_correction=False for your Detector and try again.")
return
qmap_singles = {}
F_tot = 0.
# Calculate patterns of all single particles individually
for particle_key, D_particle in D_particles.items():
p = D_particle["_class_instance"]
# Intensity at interaction point
pos = D_particle["position"]
D_particle["intensity"] = self.source.get_intensity(pos, "ph/m2", pulse_energy=D_source["pulse_energy"])
I_0 = D_particle["intensity"]
# Calculate primary wave amplitude
# F0 = sqrt(I_0) 2pi/wavelength^2
F0 = numpy.sqrt(I_0)*2*numpy.pi/wavelength**2
D_particle["F0"] = F0
# 3D Orientation
extrinsic_rotation = Rotation(values=D_particle["extrinsic_quaternion"], formalism="quaternion")
if isinstance(p, condor.particle.ParticleSphere) or isinstance(p, condor.particle.ParticleSpheroid) or isinstance(p, condor.particle.ParticleMap):
# Solid angles
if self.detector.solid_angle_correction:
Omega_p = self.detector.get_all_pixel_solid_angles(cx, cy)
else:
Omega_p = pixel_size**2 / detector_distance**2
# UNIFORM SPHERE
if isinstance(p, condor.particle.ParticleSphere):
# Refractive index
dn = p.get_dn(wavelength)
# Scattering vectors
if ndim == 2:
qmap = self.get_qmap(nx=nx, ny=ny, cx=cx, cy=cy, pixel_size=pixel_size, detector_distance=detector_distance, wavelength=wavelength, extrinsic_rotation=None)
else:
qmap = qmap0
q = numpy.sqrt((qmap**2).sum(axis=ndim))
# Intensity scaling factor
R = D_particle["diameter"]/2.
V = 4/3.*numpy.pi*R**3
K = (F0*V*dn)**2
# Pattern
F = condor.utils.sphere_diffraction.F_sphere_diffraction(K, q, R) * numpy.sqrt(Omega_p)
# UNIFORM SPHEROID
elif isinstance(p, condor.particle.ParticleSpheroid):
if ndim == 3:
log_and_raise_error(logger, "Spheroid simulation with ndim = 3 is not supported.")
return
# Refractive index
dn = p.get_dn(wavelength)
# Scattering vectors
qmap = self.get_qmap(nx=nx, ny=ny, cx=cx, cy=cy, pixel_size=pixel_size, detector_distance=detector_distance, wavelength=wavelength, extrinsic_rotation=None, order="xyz")
qx = qmap[:,:,0]
qy = qmap[:,:,1]
# Intensity scaling factor
R = D_particle["diameter"]/2.
V = 4/3.*numpy.pi*R**3
K = (F0*V*abs(dn))**2
# Geometrical factors
a = condor.utils.spheroid_diffraction.to_spheroid_semi_diameter_a(D_particle["diameter"], D_particle["flattening"])
c = condor.utils.spheroid_diffraction.to_spheroid_semi_diameter_c(D_particle["diameter"], D_particle["flattening"])
# Pattern
# Spheroid axis before rotation
v0 = numpy.array([0.,1.,0.])
v1 = extrinsic_rotation.rotate_vector(v0)
theta = numpy.arcsin(v1[2])
phi = numpy.arctan2(-v1[0],v1[1])
F = condor.utils.spheroid_diffraction.F_spheroid_diffraction(K, qx, qy, a, c, theta, phi) * numpy.sqrt(Omega_p)
# MAP
elif isinstance(p, condor.particle.ParticleMap):
# Resolution
dx_required = self.detector.get_resolution_element_r(wavelength, cx=cx, cy=cy, center_variation=False)
dx_suggested = self.detector.get_resolution_element_r(wavelength, center_variation=True)
# Scattering vectors (the nfft requires order z,y,x)
if ndim == 2:
qmap = self.get_qmap(nx=nx, ny=ny, cx=cx, cy=cy, pixel_size=pixel_size, detector_distance=detector_distance, wavelength=wavelength, extrinsic_rotation=extrinsic_rotation, order="zyx")
else:
qmap = self.detector.generate_qmap_3d(wavelength=wavelength, qn=qn, qmax=qmax, extrinsic_rotation=extrinsic_rotation, order="zyx")
# Generate map
map3d_dn, dx = p.get_new_dn_map(D_particle, dx_required, dx_suggested, wavelength)
log_debug(logger, "Sampling of map: dx_required = %e m, dx_suggested = %e m, dx = %e m" % (dx_required, dx_suggested, dx))
if save_map3d:
D_particle["map3d_dn"] = map3d_dn
D_particle["dx"] = dx
# Rescale and shape qmap for nfft
qmap_scaled = dx * qmap / (2. * numpy.pi)
qmap_shaped = qmap_scaled.reshape(int(qmap_scaled.size/3), 3)
# Check inputs
invalid_mask = ~((qmap_shaped>=-0.5) * (qmap_shaped<0.5))
if numpy.any(invalid_mask):
qmap_shaped[invalid_mask] = 0.
log_warning(logger, "%i invalid pixel positions." % invalid_mask.sum())
log_debug(logger, "Map3d input shape: (%i,%i,%i), number of dimensions: %i, sum %f" % (map3d_dn.shape[0], map3d_dn.shape[1], map3d_dn.shape[2], len(list(map3d_dn.shape)), abs(map3d_dn).sum()))
if (numpy.isfinite(abs(map3d_dn))==False).sum() > 0:
log_warning(logger, "There are infinite values in the dn map of the object.")
log_debug(logger, "Scattering vectors shape: (%i,%i); Number of dimensions: %i" % (qmap_shaped.shape[0], qmap_shaped.shape[1], len(list(qmap_shaped.shape))))
if (numpy.isfinite(qmap_shaped)==False).sum() > 0:
log_warning(logger, "There are infinite values in the scattering vectors.")
# NFFT
fourier_pattern = log_execution_time(logger)(condor.utils.nfft.nfft)(map3d_dn, qmap_shaped)
# Check output - masking in case of invalid values
if numpy.any(invalid_mask):
fourier_pattern[invalid_mask.any(axis=1)] = numpy.nan
# reshaping
fourier_pattern = numpy.reshape(fourier_pattern, tuple(list(qmap_scaled.shape)[:-1]))
log_debug(logger, "Generated pattern of shape %s." % str(fourier_pattern.shape))
F = F0 * fourier_pattern * dx**3 * numpy.sqrt(Omega_p)
# ATOMS
elif isinstance(p, condor.particle.ParticleAtoms):
# Import here to make other functionalities of Condor independent of spsim
import spsim
# Check version
from distutils.version import StrictVersion
spsim_version_min = "0.1.0"
if not hasattr(spsim, "__version__") or StrictVersion(spsim.__version__) < StrictVersion(spsim_version_min):
log_and_raise_error(logger, "Your spsim version is too old. Please install the newest spsim version and try again.")
sys.exit(0)
# Create options struct
opts = condor.utils.config._conf_to_spsim_opts(D_source, D_particle, D_detector, ndim=ndim, qn=qn, qmax=qmax)
spsim.write_options_file("./spsim.confout",opts)
# Create molecule struct
mol = spsim.get_molecule_from_atoms(D_particle["atomic_numbers"], D_particle["atomic_positions"])
# Always recenter molecule
spsim.origin_to_center_of_mass(mol)
spsim.write_pdb_from_mol("./mol.pdbout", mol)
# Calculate diffraction pattern
pat = spsim.simulate_shot(mol, opts)
# Extract complex Fourier values from spsim output
F_img = spsim.make_cimage(pat.F, pat.rot, opts)
phot_img = spsim.make_image(opts.detector.photons_per_pixel, pat.rot, opts)
F = numpy.sqrt(abs(phot_img.image[:])) * numpy.exp(1.j * numpy.angle(F_img.image[:]))
spsim.sp_image_free(F_img)
spsim.sp_image_free(phot_img)
# Extract qmap from spsim output
if ndim == 2:
qmap_img = spsim.sp_image_alloc(3, nx, ny)
else:
qmap_img = spsim.sp_image_alloc(3*qn, qn, qn)
spsim.array_to_image(pat.HKL_list, qmap_img)
if ndim == 2:
qmap = 2*numpy.pi * qmap_img.image.real
else:
qmap = 2*numpy.pi * numpy.reshape(qmap_img.image.real, (qn, qn, qn, 3))
self._qmap_cache = {
"qmap" : qmap,
"nx" : nx,
"ny" : ny,
"cx" : cx,
"cy" : cy,
"pixel_size" : pixel_size,
"detector_distance" : detector_distance,
"wavelength" : wavelength,
"extrinsic_rotation": copy.deepcopy(extrinsic_rotation),
"order" : 'zyx',
}
spsim.sp_image_free(qmap_img)
spsim.free_diffraction_pattern(pat)
spsim.free_output_in_options(opts)
else:
log_and_raise_error(logger, "No valid particles initialized.")
sys.exit(0)
if save_qmap:
qmap_singles[particle_key] = qmap
v = D_particle["position"]
# Calculate phase factors if needed
if not numpy.allclose(v, numpy.zeros_like(v), atol=1E-12):
if ndim == 2:
F = F * numpy.exp(-1.j*(v[0]*qmap0[:,:,0]+v[1]*qmap0[:,:,1]+v[2]*qmap0[:,:,2]))
else:
F = F * numpy.exp(-1.j*(v[0]*qmap0[:,:,:,0]+v[1]*qmap0[:,:,:,1]+v[2]*qmap0[:,:,:,2]))
# Superimpose patterns
F_tot = F_tot + F
# Polarization correction
if ndim == 2:
P = self.detector.calculate_polarization_factors(cx=cx, cy=cy, polarization=self.source.polarization)
else:
if self.source.polarization != "ignore":
log_and_raise_error(logger, "polarization=\"%s\" for a 3D propagation does not make sense. Set polarization=\"ignore\" in your Source configuration and try again." % self.source.polarization)
return
P = 1.
F_tot = numpy.sqrt(P) * F_tot
# Photon detection
I_tot, M_tot = self.detector.detect_photons(abs(F_tot)**2)
if ndim == 2:
M_tot_binary = M_tot == 0
if self.detector.binning is not None:
IXxX_tot, MXxX_tot = self.detector.bin_photons(I_tot, M_tot)
FXxX_tot, MXxX_tot = condor.utils.resample.downsample(F_tot, self.detector.binning, mode="integrate",
mask2d0=M_tot, bad_bits=PixelMask.PIXEL_IS_IN_MASK, min_N_pixels=1)
MXxX_tot_binary = None if MXxX_tot is None else (MXxX_tot == 0)
else:
M_tot_binary = None
O = {}
O["source"] = D_source
O["particles"] = D_particles
O["detector"] = D_detector
O["entry_1"] = {}
data_1 = {}
data_1["data_fourier"] = F_tot
data_1["data"] = I_tot
data_1["mask"] = M_tot
data_1["full_period_resolution"] = 2 * self.detector.get_max_resolution(wavelength)
O["entry_1"]["data_1"] = data_1
if self.detector.binning is not None:
data_2 = {}
data_2["data_fourier"] = FXxX_tot
data_2["data"] = IXxX_tot
data_2["mask"] = MXxX_tot
O["entry_1"]["data_2"] = data_2
O = remove_from_dict(O, "_")
return O
@log_execution_time(logger)
def get_qmap(self, nx, ny, cx, cy, pixel_size, detector_distance, wavelength, extrinsic_rotation=None, order="xyz"):
calculate = False
if self._qmap_cache == {}:
calculate = True
else:
calculate = calculate or nx != self._qmap_cache["nx"]
calculate = calculate or ny != self._qmap_cache["ny"]
calculate = calculate or cx != self._qmap_cache["cx"]
calculate = calculate or cy != self._qmap_cache["cy"]
calculate = calculate or pixel_size != self._qmap_cache["pixel_size"]
calculate = calculate or detector_distance != self._qmap_cache["detector_distance"]
calculate = calculate or wavelength != self._qmap_cache["wavelength"]
calculate = calculate or order != self._qmap_cache["order"]
if extrinsic_rotation is not None:
calculate = calculate or not extrinsic_rotation.is_similar(self._qmap_cache["extrinsic_rotation"])
if calculate:
log_debug(logger, "Calculating qmap")
self._qmap_cache = {
"qmap" : self.detector.generate_qmap(wavelength, cx=cx, cy=cy, extrinsic_rotation=extrinsic_rotation, order=order),
"nx" : nx,
"ny" : ny,
"cx" : cx,
"cy" : cy,
"pixel_size" : pixel_size,
"detector_distance" : detector_distance,
"wavelength" : wavelength,
"extrinsic_rotation": copy.deepcopy(extrinsic_rotation),
"order" : order,
}
return self._qmap_cache["qmap"]
def get_qmap_from_cache(self):
if self._qmap_cache == {} or not "qmap" in self._qmap_cache:
log_and_raise_error(logger, "Cache empty!")
return None
else:
return self._qmap_cache["qmap"]
def get_resolution(self, wavelength = None, cx = None, cy = None, pos="corner", convention="full_period"):
if wavelength is None:
wavelength = self.source.photon.get_wavelength()
dx = self.detector.get_resolution_element_x(wavelength, cx=cx, cy=cy)
dy = self.detector.get_resolution_element_y(wavelength, cx=cx, cy=cy)
dxdy = numpy.array([dx, dy])
if convention == "full_period":
return dxdy*2
elif convention == "half_period":
return dxdy
else:
log_and_raise_error(logger, "Invalid input: convention=%s. Must be either \"full_period\" or \"half_period\"." % convention)
return
def get_linear_sampling_ratio(self, wavelength = None, particle_diameter = None, particle_key = None):
"""
Returns the linear sampling ratio :math:`o` of the diffraction pattern:
| :math:`o=\\frac{D\\lambda}{dp}`
| :math:`D`: Detector distance
| :math:`p`: Detector pixel size (edge length)
| :math:`\\lambda`: Photon wavelength
| :math:`d`: Particle diameter
"""
if wavelength is None:
wavelength = self.source.photon.get_wavelength()
detector_distance = self.detector.distance
if particle_diameter is None:
if len(self.particles) == 1:
p = self.particles.values()[0]
elif particle_key is None:
log_and_raise_error(logger, "You need to specify a particle_key because there are more than one particle models.")
else:
p = self.particles[particle_key]
particle_diameter = p.diameter_mean
pN = utils.diffraction.nyquist_pixel_size(wavelength, detector_distance, particle_diameter)
pD = self.detector.pixel_size
ratio = pN/pD
return ratio
def get_fresnel_number(self, wavelength):
pass
# ------------------------------------------------------------------------------------------------
# Caching of map3d might be interesting to implement again in the future
#def get_map3d(self, map3d, dx, dx_req):
# map3d = None
# if dx > dx_req:
# logger.error("Finer real space sampling required for chosen geometry.")
# return
# # has map3d_fine the required real space grid?
# if map3d == None and abs(self.dX_fine/self.dX-1) < 0.001:
# # ok, we'll take the fine map
# map3d = self.map3d_fine
# logger.debug("Using the fine map for proagtion.")
# self._map3d = self.map3d_fine
# # do we have an interpolated map?
# if map3d == None and self._dX != None:
# # does it have the right spacing?
# if abs(self._dX/self.dX-1) < 0.001:
# # are the shapes of the original fine map and our current fine map the same?
# if numpy.all(numpy.array(self.map3d_fine.shape)==numpy.array(self._map3d_fine.shape)):
# # is the grid of the original fine map and the current fine map the same?
# if self.dX_fine == self._dX_fine:
# # are the values of the original fine map and the cached fine map the same?
# if numpy.all(self.map3d_fine==self._map3d_fine):
# # ok, we take the cached map!
# map3d = self._map3d
# logger.debug("Using the cached interpolated map for propagtion.")
# # do we have to do interpolation?
# if map3d == None and self.dX_fine < self.dX:
# from scipy import ndimage
# f = self.dX_fine/self.dX
# N_mapfine = self.map3d_fine.shape[0]
# L_mapfine = (N_mapfine-1)*self.dX_fine
# N_map = int(numpy.floor((N_mapfine-1)*f))+1
# L_map = (N_map-1)*self.dX
# gt = numpy.float64(numpy.indices((N_map,N_map,N_map)))/float(N_map-1)*(N_mapfine-1)*L_map/L_mapfine
# map3d = ndimage.map_coordinates(self.map3d_fine, gt, order=3)
# # Cache interpolated data
# self._map3d = map3d
# self._dX = N_mapfine/(1.*N_map)*self.dX_fine
# # Cace fine data for later decision whether or not the interpolated map can be used again
# self._map3d_fine = self.map3d_fine
# self._dX_fine = self.dX_fine
# logger.debug("Using a newly interpolated map for propagtion.")
# return map3d
# ------------------------------------------------------------------------------------------------
def remove_from_dict(D, startswith="_"):
for k,v in list(D.items()):
if k.startswith(startswith):
del D[k]
if isinstance(v, dict):
remove_from_dict(D[k], startswith)
return D
| |
import os
import socket
import venusian
from botocore.exceptions import ClientError
from flowy.swf.client import SWFClient, IDENTITY_SIZE
from flowy.swf.decision import SWFActivityDecision
from flowy.swf.decision import SWFWorkflowDecision
from flowy.swf.history import SWFExecutionHistory
from flowy.utils import logger
from flowy.utils import setup_default_logger
from flowy.worker import Worker
__all__ = ['SWFWorkflowWorker', 'SWFActivityWorker']
class SWFWorker(Worker):
def __init__(self):
super(SWFWorker, self).__init__()
self.remote_reg_callbacks = []
def __call__(self, name, version, input_data, decision, *extra_args):
return super(SWFWorker, self).__call__(
(str(name), str(version)), input_data, decision, *extra_args)
def register_remote(self, swf_client, domain):
"""Register or check compatibility of all configs in Amazon SWF."""
for remote_reg_callback in self.remote_reg_callbacks:
# Raises if there are registration problems
remote_reg_callback(swf_client, domain)
def register(self, config, func, version, name=None):
super(SWFWorker, self).register(config, func, (name, version))
def add_remote_reg_callback(self, callback):
self.remote_reg_callbacks.append(callback)
def make_scanner(self):
return venusian.Scanner(
register_task=self.register_task,
add_remote_reg_callback=self.add_remote_reg_callback)
class SWFWorkflowWorker(SWFWorker):
categories = ['swf_workflow']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision, execution_history):
super(SWFWorkflowWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision, execution_history) # extra_args passed to proxies
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Starts an endless single threaded/single process worker loop.
The worker polls endlessly for new decisions from the specified domain
and task list and runs them.
If reg_remote is set, all registered workflow are registered remotely.
An identity can be set to track this worker in the SWF console,
otherwise a default identity is generated from this machine domain and
process pid.
If setup_log is set, a default configuration for the logger is loaded.
A custom SWF client can be passed in swf_client, otherwise a default
client is used.
"""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
name, version, input_data, exec_history, decision = poll_decision(
swf_client, domain, task_list, identity)
self(name, version, input_data, decision, exec_history)
except KeyboardInterrupt:
pass
class SWFActivityWorker(SWFWorker):
categories = ['swf_activity']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision):
# No extra arguments are used
super(SWFActivityWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision.heartbeat) # extra_args
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Same as SWFWorkflowWorker.run_forever but for activities."""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_activity_task(
domain, task_list, identity=identity)
except ClientError:
# add a delay before retrying?
logger.exception('Error while polling for activities:')
at = swf_response['activityType']
decision = SWFActivityDecision(swf_client, swf_response['taskToken'])
self(at['name'], at['version'], swf_response['input'], decision)
except KeyboardInterrupt:
pass
def default_identity():
"""Generate a local identity string for this process."""
identity = "%s-%s" % (socket.getfqdn(), os.getpid())
return identity[-IDENTITY_SIZE:] # keep the most important part
def poll_decision(swf_client, domain, task_list, identity=None):
"""Poll a decision and create a SWFWorkflowContext structure.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll decision
:param identity: an identity str of the request maker
:rtype: tuple
:returns: a tuple consisting of (name, version, input_data,
:class:'SWFExecutionHistory', :class:`SWFWorkflowDecision`)
"""
first_page = poll_first_page(swf_client, domain, task_list, identity)
token = first_page['taskToken']
all_events = events(swf_client, domain, task_list, first_page, identity)
# Sometimes the first event is on the second page,
# and the first page is empty
first_event = next(all_events)
assert first_event['eventType'] == 'WorkflowExecutionStarted'
wesea = 'workflowExecutionStartedEventAttributes'
assert first_event[wesea]['taskList']['name'] == task_list
task_duration = first_event[wesea]['taskStartToCloseTimeout']
workflow_duration = first_event[wesea]['executionStartToCloseTimeout']
tags = first_event[wesea].get('tagList', None)
child_policy = first_event[wesea]['childPolicy']
name = first_event[wesea]['workflowType']['name']
version = first_event[wesea]['workflowType']['version']
input_data = first_event[wesea]['input']
try:
running, timedout, results, errors, order = load_events(all_events)
except _PaginationError:
# There's nothing better to do than to retry
return poll_decision(swf_client, domain, task_list, identity)
execution_history = SWFExecutionHistory(running, timedout, results, errors, order)
decision = SWFWorkflowDecision(swf_client, token, name, version, task_list,
task_duration, workflow_duration, tags,
child_policy)
return name, version, input_data, execution_history, decision
def poll_first_page(swf_client, domain, task_list, identity=None):
"""Return the response from loading the first page. In case of errors,
empty responses or whatnot retry until a valid response.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_decision_task(domain, task_list,
identity=identity)
except ClientError:
logger.exception('Error while polling for decisions:')
return swf_response
def poll_page(swf_client, domain, task_list, token, identity=None):
"""Return a specific page. In case of errors retry a number of times.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param token: the token string for the requested page
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
for _ in range(7): # give up after a limited number of retries
try:
swf_response = swf_client.poll_for_decision_task(
domain, task_list, identity=identity, next_page_token=token)
break
except ClientError:
logger.exception('Error while polling for decision page:')
else:
raise _PaginationError()
return swf_response
def events(swf_client, domain, task_list, first_page, identity=None):
"""Load pages one by one and generate all events found.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param first_page: the page dict structure from which to start generating
the events, usually the response from :func:`poll_first_page`
:param identity: an identity str of the request maker
:rtype: collections.Iterator[dict[str, int|str|dict[str, int|str|dict]]
:returns: iterator over all of the events
"""
page = first_page
while 1:
for event in page['events']:
yield event
if not page.get('nextPageToken'):
break
page = poll_page(swf_client, domain, task_list, page['nextPageToken'],
identity=identity)
def load_events(event_iter):
"""Combine all events in their order.
This returns a tuple of the following things:
running - a set of the ids of running tasks
timedout - a set of the ids of tasks that have timedout
results - a dictionary of id -> result for each finished task
errors - a dictionary of id -> error message for each failed task
order - an list of task ids in the order they finished
"""
running, timedout = set(), set()
results, errors = {}, {}
order = []
event2call = {}
for event in event_iter:
e_type = event.get('eventType')
if e_type == 'ActivityTaskScheduled':
eid = event['activityTaskScheduledEventAttributes']['activityId']
event2call[event['eventId']] = eid
running.add(eid)
elif e_type == 'ActivityTaskCompleted':
atcea = 'activityTaskCompletedEventAttributes'
eid = event2call[event[atcea]['scheduledEventId']]
result = event[atcea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ActivityTaskFailed':
atfea = 'activityTaskFailedEventAttributes'
eid = event2call[event[atfea]['scheduledEventId']]
reason = event[atfea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ActivityTaskTimedOut':
attoea = 'activityTaskTimedOutEventAttributes'
eid = event2call[event[attoea]['scheduledEventId']]
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'ScheduleActivityTaskFailed':
satfea = 'scheduleActivityTaskFailedEventAttributes'
eid = event[satfea]['activityId']
reason = event[satfea]['cause']
# when a job is not found it's not even started
errors[eid] = reason
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionInitiated':
scweiea = 'startChildWorkflowExecutionInitiatedEventAttributes'
eid = _subworkflow_call_key(event[scweiea]['workflowId'])
running.add(eid)
elif e_type == 'ChildWorkflowExecutionCompleted':
cwecea = 'childWorkflowExecutionCompletedEventAttributes'
eid = _subworkflow_call_key(
event[cwecea]['workflowExecution']['workflowId'])
result = event[cwecea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ChildWorkflowExecutionFailed':
cwefea = 'childWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(
event[cwefea]['workflowExecution']['workflowId'])
reason = event[cwefea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ChildWorkflowExecutionTimedOut':
cwetoea = 'childWorkflowExecutionTimedOutEventAttributes'
eid = _subworkflow_call_key(
event[cwetoea]['workflowExecution']['workflowId'])
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionFailed':
scwefea = 'startChildWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(event[scwefea]['workflowId'])
reason = event[scwefea]['cause']
errors[eid] = reason
order.append(eid)
elif e_type == 'TimerStarted':
eid = event['timerStartedEventAttributes']['timerId']
running.add(eid)
elif e_type == 'TimerFired':
eid = event['timerFiredEventAttributes']['timerId']
running.remove(eid)
results[eid] = None
return running, timedout, results, errors, order
class _PaginationError(Exception):
"""Can't retrieve the next page after X retries."""
def _subworkflow_call_key(w_id):
return w_id.split(':')[-1]
| |
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from cms import constants
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'CONTENT_CACHE_DURATION': 60,
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 10,
'MAX_PAGE_HISTORY_REVERSIONS': 15,
'TOOLBAR_ANONYMOUS_ON': True,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'build',
'TOOLBAR_URL__DISABLE': 'toolbar_off',
'ADMIN_NAMESPACE': 'admin',
'APP_NAME': None,
'TOOLBAR_HIDE': False
}
def get_cache_durations():
return {
'menus': getattr(settings, 'MENU_CACHE_DURATION', 60 * 60),
'content': get_cms_setting('CONTENT_CACHE_DURATION'),
'permissions': 60 * 60,
}
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__build():
return get_cms_setting('TOOLBAR_URL__BUILD')
@default('CMS_TOOLBAR_URL__DISABLE')
def get_toolbar_url__disable():
return get_cms_setting('TOOLBAR_URL__DISABLE')
def get_templates():
from cms.utils.django_load import load_from_file
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE_DIRS for filesystem loader
try:
path = settings.TEMPLATE_DIRS
except IndexError:
path = [template['DIRS'][0] for template in settings.TEMPLATES]
for basedir in path:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if config_path:
template_module = load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _(constants.TEMPLATE_INHERITANCE_LABEL)))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if site != hash(site):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check
return languages
def get_languages():
if settings.SITE_ID != hash(settings.SITE_ID):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__build,
'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
else:
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
| |
# -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.tektronix.tds1012
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers to control an oscilloscope.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
Source: Tektronix Manual
"""
import numpy as np
from lantz.feat import Feat
from lantz.action import Action
from lantz.drivers.legacy.serial import SerialDriver
from lantz.errors import InvalidCommand
class TDS1012(SerialDriver):
"""Tektronix TDS1012 100MHz 2 Channel Digital Storage Oscilloscope
"""
ENCODING = 'ascii'
RECV_TERMINATION = '\n'
SEND_TERMINATION = '\n'
TIMEOUT = -1 # Avoids timeout while acquiring a curve. May not be the
# best option.
def __init__(self, port):
# super().TIMEOUT = 20
super().__init__(port)
super().initialize() # Automatically open the port
@Action()
def initiate(self):
""" Initiates the acquisition in the osciloscope.
"""
self.send(':ACQ:STATE ON')
@Action()
def idn(self):
""" Identify the Osciloscope
"""
return self.query('*IDN?')
@Action()
def autoset(self):
""" Adjust the vertical, horizontal and trigger controls to display a
stable waveform.
"""
self.send('AUTOS EXEC')
@Action()
def autocal(self):
""" Autocalibration of osciloscope. It may take several minutes to
complete
"""
return self.send('*CAL')
@Feat(limits=(1,2))
def datasource(self):
""" Retrieves the data source from which data is going to be taken.
TDS1012 has 2 channels
"""
return self.query('DAT:SOU?')
@datasource.setter
def datasource(self,value):
""" Sets the data source for the acquisition of data.
"""
self.send('DAT:SOU CH{}'.format(value))
@Action()
def acquire_parameters(self):
""" Acquire parameters of the osciloscope.
It is intended for adjusting the values obtained in acquire_curve
"""
values = 'XZE?;XIN?;PT_OF?;YZE?;YMU?;YOF?;'
answer = self.query('WFMP:{}'.format(values))
parameters = {}
for v, j in zip(values.split('?;'),answer.split(';')):
parameters[v] = float(j)
return parameters
@Action()
def data_setup(self):
""" Sets the way data is going to be encoded for sending.
"""
self.send('DAT:ENC ASCI;WID 2') #ASCII is the least efficient way, but
# couldn't make the binary mode to work
@Action()
def acquire_curve(self,start=1,stop=2500):
""" Gets data from the oscilloscope. It accepts setting the start and
stop points of the acquisition (by default the entire range).
"""
parameters = self.acquire_parameters()
self.data_setup()
self.send('DAT:STAR {}'.format(start))
self.send('DAT:STOP {}'.format(stop))
data = self.query('CURV?')
data = data.split(',')
data = np.array(list(map(float,data)))
ydata = (data - parameters['YOF']) * parameters['YMU']\
+ parameters['YZE']
xdata = np.arange(len(data))*parameters['XIN'] + parameters['XZE']
return list(xdata), list(ydata)
@Action()
def forcetrigger(self):
""" Creates a trigger event.
"""
self.send('TRIG:FORC')
return
@Action()
def triggerlevel(self):
""" Sets the trigger level to 50% of the minimum and maximum values of
the signal.
"""
self.send('TRIG:MAI SETL')
@Feat(values={'AUTO', 'NORMAL'})
def trigger(self):
""" Retrieves trigger state.
"""
return self.query('TRIG:MAIN:MODE?')
@trigger.setter
def trigger(self,state):
""" Sets the trigger state.
"""
self.send('TRIG:MAI:MOD {}'.format(state))
return
@Feat()
def horizontal_division(self):
""" Horizontal time base division.
"""
return float(self.query('HOR:MAI:SCA?'))
@horizontal_division.setter
def horizontal_division(self,value):
""" Sets the horizontal time base division.
"""
self.send('HOR:MAI:SCA {}'.format(value))
return
@Feat(values={0, 4, 16, 64, 128})
def number_averages(self):
""" Number of averages
"""
answer = self.query('ACQ?')
answer = answer.split(';')
if answer[0] == 'SAMPLE':
return 0
elif answer[0] == 'AVERAGE':
return int(self.query('ACQ:NUMAV?'))
else:
raise InvalidCommand
@number_averages.setter
def number_averages(self,value):
""" Sets the number of averages. If 0, the it is a continous sample.
"""
if value == 0:
self.send('ACQ:MOD SAMPLE')
else:
self.send('ACQ:MOD AVE;NUMAV {}'.format(value))
@Action(values={'FREQ', 'MINI', 'MAXI', 'MEAN'})
def _measure(self, mode):
""" Measures the Frequency, Minimum, Maximum or Mean of a signal.
"""
self.send('MEASU:IMM:TYP {}'.format(mode))
return float(self.query('MEASU:IMM:VAL?'))
def measure_mean(self):
""" Gets the mean of the signal.
"""
answer = self._measure('MEAN')
return answer
def measure_frequency(self):
""" Gets the frequency of the signal.
"""
answer = self._measure('FREQ')
return answer
def measure_minimum(self):
""" Gets the minimum of the signal.
"""
answer = self._measure('MINI')
return answer
def measure_maximum(self):
""" Gets the mean of the signal.
"""
answer = self._measure('MAXI')
return answer
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Measure using TDS1012 and dump to screen')
parser.add_argument('-p', '--port', default='/dev/ttyS0',
help='Serial port')
parser.add_argument('-v', '--view', action='store_true', default=True,
help='View ')
parser.add_argument('-c', '--channel', default=1, type=int,
help='Channel to use')
args = parser.parse_args()
osc = TDS1012(args.port)
osc.initiate()
print('Osciloscope Identification: {}'.format(osc.idn))
print(osc.trigger)
osc.forcetrigger()
osc.triggerlevel()
osc.trigger = "AUTO"
print(osc.trigger)
params = osc.acquire_parameters()
if args.view:
import matplotlib.pyplot as plt
if args.view:
osc.datasource = args.channel
x, y = osc.acquire_curve()
x = np.array(x)
x = x - x.min()
y = np.array(y)
plt.plot(x, y)
plt.show()
| |
from __future__ import absolute_import
import re
import bisect
import sys
from graphite.tags.base import BaseTagDB, TaggedSeries
class RedisTagDB(BaseTagDB):
"""
Stores tag information in a Redis database.
Keys used are:
.. code-block:: none
series # Set of all paths
series:<path>:tags # Hash of all tag:value pairs for path
tags # Set of all tags
tags:<tag>:series # Set of paths with entry for tag
tags:<tag>:values # Set of values for tag
tags:<tag>:values:<value> # Set of paths matching tag/value
"""
def __init__(self, settings, *args, **kwargs):
super(RedisTagDB, self).__init__(settings, *args, **kwargs)
from redis import Redis
self.r = Redis(
host=settings.TAGDB_REDIS_HOST,
port=settings.TAGDB_REDIS_PORT,
db=settings.TAGDB_REDIS_DB,
password=settings.TAGDB_REDIS_PASSWORD,
decode_responses=(sys.version_info[0] >= 3),
)
def _find_series(self, tags, requestContext=None):
selector = None
selector_cnt = None
filters = []
# loop through tagspecs, look for best spec to use as selector
for tagspec in tags:
(tag, operator, spec) = self.parse_tagspec(tagspec)
if operator == '=':
matches_empty = spec == ''
if not matches_empty:
cnt = self.r.scard('tags:' + tag + ':values:' + spec)
if not selector or selector[1] != '=' or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, spec)
selector_cnt = cnt
continue
filters.append((tag, operator, spec))
elif operator == '=~':
pattern = re.compile(spec)
matches_empty = bool(pattern.match(''))
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, pattern)
selector_cnt = cnt
continue
filters.append((tag, operator, pattern))
elif operator == '!=':
matches_empty = spec != ''
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, spec)
selector_cnt = cnt
continue
filters.append((tag, operator, spec))
elif operator == '!=~':
pattern = re.compile(spec)
matches_empty = not pattern.match('')
if not matches_empty and (not selector or selector[1] != '='):
cnt = self.r.scard('tags:' + tag + ':values')
if not selector or selector_cnt > cnt:
if selector:
filters.append(selector)
selector = (tag, operator, pattern)
selector_cnt = cnt
continue
filters.append((tag, operator, pattern))
else:
raise ValueError("Invalid operator %s" % operator)
if not selector:
raise ValueError("At least one tagspec must not match the empty string")
# get initial list of series
(tag, operator, spec) = selector
# find list of values that match the tagspec
values = None
if operator == '=':
values = [spec]
elif operator == '=~':
# see if we can identify a literal prefix to filter by in redis
match = None
m = re.match('([a-z0-9]+)([^*?|][^|]*)?$', spec.pattern)
if m:
match = m.group(1) + '*'
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values', match=match) if spec.match(value) is not None]
elif operator == '!=':
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values') if value != spec]
elif operator == '!=~':
values = [value for value in self.r.sscan_iter('tags:' + tag + ':values') if spec.match(value) is None]
# if this query matched no values, just short-circuit since the result of the final intersect will be empty
if not values:
return []
results = []
# apply filters
operators = ['=','!=','=~','!=~']
filters.sort(key=lambda a: operators.index(a[1]))
for series in self.r.sunion(*['tags:' + tag + ':values:' + value for value in values]):
parsed = self.parse(series)
matched = True
for (tag, operator, spec) in filters:
value = parsed.tags.get(tag, '')
if (
(operator == '=' and value != spec) or
(operator == '=~' and spec.match(value) is None) or
(operator == '!=' and value == spec) or
(operator == '!=~' and spec.match(value) is not None)
):
matched = False
break
if matched:
bisect.insort_left(results, series)
return results
def get_series(self, path, requestContext=None):
tags = {}
tags = self.r.hgetall('series:' + path + ':tags')
if not tags:
return None
return TaggedSeries(tags['name'], tags)
def list_tags(self, tagFilter=None, limit=None, requestContext=None):
result = []
if tagFilter:
tagFilter = re.compile(tagFilter)
for tag in self.r.sscan_iter('tags'):
if tagFilter and tagFilter.match(tag) is None:
continue
if len(result) == 0 or tag >= result[-1]:
if limit and len(result) >= limit:
continue
result.append(tag)
else:
bisect.insort_left(result, tag)
if limit and len(result) > limit:
del result[-1]
return [
{'tag': tag}
for tag in result
]
def get_tag(self, tag, valueFilter=None, limit=None, requestContext=None):
if not self.r.sismember('tags', tag):
return None
return {
'tag': tag,
'values': self.list_values(
tag,
valueFilter=valueFilter,
limit=limit,
requestContext=requestContext
),
}
def list_values(self, tag, valueFilter=None, limit=None, requestContext=None):
result = []
if valueFilter:
valueFilter = re.compile(valueFilter)
for value in self.r.sscan_iter('tags:' + tag + ':values'):
if valueFilter and valueFilter.match(value) is None:
continue
if len(result) == 0 or value >= result[-1]:
if limit and len(result) >= limit:
continue
result.append(value)
else:
bisect.insort_left(result, value)
if limit and len(result) > limit:
del result[-1]
return [
{'value': value, 'count': self.r.scard('tags:' + tag + ':values:' + value)}
for value in result
]
def tag_series(self, series, requestContext=None):
# extract tags and normalize path
parsed = self.parse(series)
path = parsed.path
with self.r.pipeline() as pipe:
pipe.sadd('series', path)
for tag, value in parsed.tags.items():
pipe.hset('series:' + path + ':tags', tag, value)
pipe.sadd('tags', tag)
pipe.sadd('tags:' + tag + ':series', path)
pipe.sadd('tags:' + tag + ':values', value)
pipe.sadd('tags:' + tag + ':values:' + value, path)
pipe.execute()
return path
def del_series(self, series, requestContext=None):
# extract tags and normalize path
parsed = self.parse(series)
path = parsed.path
with self.r.pipeline() as pipe:
pipe.srem('series', path)
pipe.delete('series:' + path + ':tags')
for tag, value in parsed.tags.items():
pipe.srem('tags:' + tag + ':series', path)
pipe.srem('tags:' + tag + ':values:' + value, path)
pipe.execute()
return True
| |
from flask import g, jsonify
from flask_restful import Resource, marshal
from flask_restful import abort, reqparse
from app import db
from models import BucketList, BucketListItem
from serializers import items_serializer
from utils import multiauth
class BucketlistItem(Resource):
""" Defines endpoints for bucketlist items manipulation
methods: GET, POST, PUT, DELETE
url: /api/v1/bucketlists/<bucketlist_id>/items/
"""
@multiauth.login_required
def post(self, id, item_id=None):
"""
request that handles bucketlist item creation
"""
if item_id:
response = jsonify({'message': 'Method not allowed(POST)'})
response.status_code = 400
return response
bucketlist = BucketList.query.get(id)
if bucketlist:
if bucketlist.created_by != g.user.id:
response = jsonify(
{'message': 'You are not authorized to use the bucketlist'})
response.status_code = 401
return response
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='A name is required')
parser.add_argument('description', type=str, default='')
parser.add_argument('is_done', type=bool, default=False)
args = parser.parse_args()
name = args["name"]
description = args["description"]
if not name:
response = jsonify(
{'message': 'Please provide a name for the bucketlist item'})
response.status_code = 400
return response
is_done = args["is_done"]
if not is_done or type(is_done) != bool:
is_done == False
item = BucketListItem(
name=name, description=description,
bucketlist_id=id, is_done=is_done, created_by=g.user.id)
if not name:
response = jsonify(
{'message': 'Please provide a name for the item'})
response.status_code = 400
return response
try:
BucketListItem.query.filter_by(name=name).one()
response = jsonify(
{'message': 'That name is already taken, try again'})
response.status_code = 400
return response
except:
try:
db.session.add(item)
db.session.commit()
message = {
'message': 'Bucket List item added Successfully!'}
response = marshal(item, items_serializer)
response.update(message)
return response, 201
except:
response = jsonify(
{'message': 'There was an error saving the item'})
response.status_code = 400
return response
else:
response = jsonify(
{'message': 'A bucketlist with the ID provided does not exist!'})
response.status_code = 204
return response
@multiauth.login_required
def put(self, id, item_id=None):
"""
request that updates an item
"""
if item_id == None:
response = jsonify({'message': 'Method not allowed, check url'})
response.status_code = 400
return response
try:
bucketlist = BucketList.query.get(id)
item = BucketListItem.query.filter_by(id=item_id).one()
except:
response = jsonify(
{'message': 'The bucketlist or item does not exist'})
response.status_code = 204
return response
if item.created_by == g.user.id:
if bucketlist and item:
parser = reqparse.RequestParser()
parser.add_argument(
'name', type=str, help='A name is required')
parser.add_argument('description', type=str, default='')
parser.add_argument('is_done', type=bool, default=False)
args = parser.parse_args()
name = args["name"]
description = args["description"]
is_done = args["is_done"]
data = {'name': name, 'description': description, 'is_done':is_done}
if not name or name == None:
data = {'description': description, 'is_done': is_done}
item_info = BucketListItem.query.filter_by(id=item_id).update(data)
try:
db.session.commit()
response = jsonify({'message': 'Bucket List item updated'})
response.status_code = 200
return response
except Exception:
response = jsonify(
{'message': 'There was an error updating the item'})
response.status_code = 500
return response
else:
response = jsonify(
{'message': 'The bucketlist or item does not exist'})
response.status_code = 204
return response
else:
response = jsonify(
{'message': 'You are not authorized to edit this'})
response.status_code = 401
return response
@multiauth.login_required
def get(self, id, item_id=None):
"""
request that lists a single item
"""
if item_id == None:
response = jsonify({'message': 'Method not allowed, check url'})
response.status_code = 400
return response
bucketlist = BucketList.query.filter_by(id=id).first()
item = BucketListItem.query.filter_by(
id=item_id, bucketlist_id=id).first()
if bucketlist:
if item:
if item.created_by == g.user.id:
return marshal(item, items_serializer)
else:
response = jsonify(
{'message': 'You are not authorized to view this'})
response.status_code = 401
return response
else:
response = jsonify({'message': 'The item does not exist'})
response.status_code = 204
return response
else:
response = jsonify({'message': 'the bucketlist does not exist'})
response.status_code = 204
return response
@multiauth.login_required
def delete(self, id, item_id=None):
"""
request that deletes an item
"""
if item_id == None:
response = jsonify({'message': 'Method not allowed (DELETE)'})
response.status_code = 400
return response
item = BucketListItem.query.get(item_id)
if item:
if item.created_by == g.user.id:
BucketListItem.query.filter_by(id=item_id).delete()
db.session.commit()
response = jsonify(
{'message': 'The item has been successfully deleted'})
response.status_code = 200
return response
else:
response = jsonify(
{'message': 'You are not authorized to del this'})
response.status_code = 401
return response
else:
response = jsonify({'message': 'The item does not exist'})
response.status_code = 204
return response
| |
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import pytest
from scipy import interpolate, sparse
from copy import deepcopy
import joblib
from sklearn.base import is_classifier
from sklearn.datasets import load_diabetes
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raises_regex
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import TempMemmap
from sklearn.utils.fixes import parse_version
from sklearn.linear_model import (
ARDRegression,
BayesianRidge,
ElasticNet,
ElasticNetCV,
enet_path,
Lars,
lars_path,
Lasso,
LassoCV,
LassoLars,
LassoLarsCV,
LassoLarsIC,
lasso_path,
LinearRegression,
MultiTaskElasticNet,
MultiTaskElasticNetCV,
MultiTaskLasso,
MultiTaskLassoCV,
OrthogonalMatchingPursuit,
Ridge,
RidgeClassifier,
RidgeCV,
)
from sklearn.linear_model._coordinate_descent import _set_order
from sklearn.utils import check_array
@pytest.mark.parametrize('l1_ratio', (-1, 2, None, 10, 'something_wrong'))
def test_l1_ratio_param_invalid(l1_ratio):
# Check that correct error is raised when l1_ratio in ElasticNet
# is outside the correct range
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
msg = "l1_ratio must be between 0 and 1; got l1_ratio="
clf = ElasticNet(alpha=0.1, l1_ratio=l1_ratio)
with pytest.raises(ValueError, match=msg):
clf.fit(X, Y)
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('input_order', ['C', 'F'])
def test_set_order_dense(order, input_order):
"""Check that _set_order returns arrays with promised order."""
X = np.array([[0], [0], [0]], order=input_order)
y = np.array([0, 0, 0], order=input_order)
X2, y2 = _set_order(X, y, order=order)
if order == 'C':
assert X2.flags['C_CONTIGUOUS']
assert y2.flags['C_CONTIGUOUS']
elif order == 'F':
assert X2.flags['F_CONTIGUOUS']
assert y2.flags['F_CONTIGUOUS']
if order == input_order:
assert X is X2
assert y is y2
@pytest.mark.parametrize('order', ['C', 'F'])
@pytest.mark.parametrize('input_order', ['C', 'F'])
def test_set_order_sparse(order, input_order):
"""Check that _set_order returns sparse matrices in promised format."""
X = sparse.coo_matrix(np.array([[0], [0], [0]]))
y = sparse.coo_matrix(np.array([0, 0, 0]))
sparse_format = "csc" if input_order == "F" else "csr"
X = X.asformat(sparse_format)
y = X.asformat(sparse_format)
X2, y2 = _set_order(X, y, order=order)
if order == 'C':
assert sparse.isspmatrix_csr(X2)
assert sparse.isspmatrix_csr(y2)
elif order == 'F':
assert sparse.isspmatrix_csc(X2)
assert sparse.isspmatrix_csc(y2)
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True,
cv=3)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30, cv=3).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert np.abs(np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert clf.score(X_test, y_test) > 0.99
def test_lasso_cv_with_some_model_selection():
from sklearn.model_selection import ShuffleSplit
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
pipe = make_pipeline(
StandardScaler(),
LassoCV(cv=ShuffleSplit(random_state=0))
)
pipe.fit(X, y)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert min(clf_unconstrained.coef_) < 0
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert min(clf_constrained.coef_) >= 0
@pytest.mark.parametrize(
"LinearModel, params",
[(Lasso, {"tol": 1e-16, "alpha": 0.1}),
(LassoLars, {"alpha": 0.1}),
(RidgeClassifier, {"solver": 'sparse_cg', "alpha": 0.1}),
(ElasticNet, {"tol": 1e-16, 'l1_ratio': 1, "alpha": 0.1}),
(ElasticNet, {"tol": 1e-16, 'l1_ratio': 0, "alpha": 0.1}),
(Ridge, {"solver": 'sparse_cg', 'tol': 1e-12, "alpha": 0.1}),
(BayesianRidge, {}),
(ARDRegression, {}),
(OrthogonalMatchingPursuit, {}),
(MultiTaskElasticNet, {"tol": 1e-16, 'l1_ratio': 1, "alpha": 0.1}),
(MultiTaskElasticNet, {"tol": 1e-16, 'l1_ratio': 0, "alpha": 0.1}),
(MultiTaskLasso, {"tol": 1e-16, "alpha": 0.1}),
(Lars, {}),
(LinearRegression, {}),
(LassoLarsIC, {})]
)
def test_model_pipeline_same_as_normalize_true(LinearModel, params):
# Test that linear models (LinearModel) set with normalize set to True are
# doing the same as the same linear model preceeded by StandardScaler
# in the pipeline and with normalize set to False
# normalize is True
model_name = LinearModel.__name__
model_normalize = LinearModel(normalize=True, fit_intercept=True, **params)
pipeline = make_pipeline(
StandardScaler(),
LinearModel(normalize=False, fit_intercept=True, **params)
)
is_multitask = model_normalize._get_tags()["multioutput_only"]
# prepare the data
n_samples, n_features = 100, 2
rng = np.random.RandomState(0)
w = rng.randn(n_features)
X = rng.randn(n_samples, n_features)
X += 20 # make features non-zero mean
y = X.dot(w)
# make classes out of regression
if is_classifier(model_normalize):
y[y > np.mean(y)] = -1
y[y > 0] = 1
if is_multitask:
y = np.stack((y, y), axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
if 'alpha' in params:
model_normalize.set_params(alpha=params['alpha'])
if model_name in ['Lasso', 'LassoLars', 'MultiTaskLasso']:
new_params = dict(
alpha=params['alpha'] * np.sqrt(X_train.shape[0]))
if model_name in ['Ridge', 'RidgeClassifier']:
new_params = dict(alpha=params['alpha'] * X_train.shape[0])
if model_name in ['ElasticNet', 'MultiTaskElasticNet']:
if params['l1_ratio'] == 1:
new_params = dict(
alpha=params['alpha'] * np.sqrt(X_train.shape[0]))
if params['l1_ratio'] == 0:
new_params = dict(alpha=params['alpha'] * X_train.shape[0])
if 'new_params' in locals():
pipeline[1].set_params(**new_params)
model_normalize.fit(X_train, y_train)
y_pred_normalize = model_normalize.predict(X_test)
pipeline.fit(X_train, y_train)
y_pred_standardize = pipeline.predict(X_test)
assert_allclose(
model_normalize.coef_ * pipeline[0].scale_, pipeline[1].coef_)
assert pipeline[1].intercept_ == pytest.approx(y_train.mean())
assert (model_normalize.intercept_ ==
pytest.approx(y_train.mean() -
model_normalize.coef_.dot(X_train.mean(0))))
assert_allclose(y_pred_normalize, y_pred_standardize)
@pytest.mark.parametrize(
"LinearModel, params",
[(Lasso, {"tol": 1e-16, "alpha": 0.1}),
(LassoCV, {"tol": 1e-16}),
(ElasticNetCV, {}),
(RidgeClassifier, {"solver": 'sparse_cg', "alpha": 0.1}),
(ElasticNet, {"tol": 1e-16, 'l1_ratio': 1, "alpha": 0.01}),
(ElasticNet, {"tol": 1e-16, 'l1_ratio': 0, "alpha": 0.01}),
(Ridge, {"solver": 'sparse_cg', 'tol': 1e-12, "alpha": 0.1}),
(LinearRegression, {}),
(RidgeCV, {})]
)
def test_model_pipeline_same_dense_and_sparse(LinearModel, params):
# Test that linear model preceeded by StandardScaler in the pipeline and
# with normalize set to False gives the same y_pred and the same .coef_
# given X sparse or dense
model_dense = make_pipeline(
StandardScaler(with_mean=False),
LinearModel(normalize=False, **params)
)
model_sparse = make_pipeline(
StandardScaler(with_mean=False),
LinearModel(normalize=False, **params)
)
# prepare the data
rng = np.random.RandomState(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.
X_sparse = sparse.csr_matrix(X)
y = rng.rand(n_samples)
if is_classifier(model_dense):
y = np.sign(y)
model_dense.fit(X, y)
model_sparse.fit(X_sparse, y)
assert_allclose(model_sparse[1].coef_, model_dense[1].coef_)
y_pred_dense = model_dense.predict(X)
y_pred_sparse = model_sparse.predict(X_sparse)
assert_allclose(y_pred_dense, y_pred_sparse)
assert_allclose(model_dense[1].intercept_, model_sparse[1].intercept_)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert clf.l1_ratio_ == min(clf.l1_ratio)
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert clf.l1_ratio_ == min(clf.l1_ratio)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert clf.score(X_test, y_test) > 0.99
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert clf.score(X_test, y_test) > 0.99
assert clf.coef_.shape == (3, 10)
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert 50 == clf.n_alphas
assert 50 == len(clf.alphas_)
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert min(lasso.coef_) >= 0
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert min(lasso.coef_) >= 0
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert min(enet.coef_) >= 0
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert min(enetcv_unconstrained.coef_) < 0
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert min(enetcv_constrained.coef_) >= 0
def test_uniform_targets():
enet = ElasticNetCV(n_alphas=3)
m_enet = MultiTaskElasticNetCV(n_alphas=3)
lasso = LassoCV(n_alphas=3)
m_lasso = MultiTaskLassoCV(n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1)
assert_warns_message(ConvergenceWarning, 'did not converge', clf.fit, X, Y)
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3, cv=3)
clf.fit(X, y)
assert 0.5 == clf.l1_ratio_
assert (3, X.shape[1]) == clf.coef_.shape
assert (3, ) == clf.intercept_.shape
assert (2, 10, 3) == clf.mse_path_.shape
assert (2, 10) == clf.alphas_.shape
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3, cv=3)
clf.fit(X, y)
assert (3, X.shape[1]) == clf.coef_.shape
assert (3, ) == clf.intercept_.shape
assert (10, 3) == clf.mse_path_.shape
assert 10 == len(clf.alphas_)
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
"array-like.*Got 'invalid'", clf.fit, X, y)
# Precompute = 'auto' is not supported for ElasticNet
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert n_iter_reference > 2
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert n_iter_cold_start == n_iter_reference
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert n_iter_warm_start == 1
def test_warm_start_convergence_with_regularizer_decrement():
X, y = load_diabetes(return_X_y=True)
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert low_reg_model.n_iter_ > high_reg_model.n_iter_
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert low_reg_model.n_iter_ > warm_low_reg_model.n_iter_
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test positive parameter
X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2)
# For mono output
# Test that the coefs returned by positive=True in enet_path are positive
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, Y[:, 0], positive=True)[1]
assert np.all(pos_path_coef >= 0)
# For multi output, positive parameter is not allowed
# Test that an error is raised
for path in [enet_path, lasso_path]:
assert_raises(ValueError, path, X, Y, positive=True)
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
# With check_input=False, an exhaustive check is not made on y but its
# dtype is still cast in _preprocess_data to X's dtype. So the test should
# pass anyway
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
@pytest.mark.parametrize("check_input", [True, False])
def test_enet_copy_X_True(check_input):
X, y, _, _ = build_dataset()
X = X.copy(order='F')
original_X = X.copy()
enet = ElasticNet(copy_X=True)
enet.fit(X, y, check_input=check_input)
assert_array_equal(original_X, X)
def test_enet_copy_X_False_check_input_False():
X, y, _, _ = build_dataset()
X = X.copy(order='F')
original_X = X.copy()
enet = ElasticNet(copy_X=False)
enet.fit(X, y, check_input=False)
# No copying, X is overwritten
assert np.any(np.not_equal(original_X, X))
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
@pytest.mark.parametrize('model', [ElasticNet, Lasso])
def test_lasso_non_float_y(model):
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
def test_enet_float_precision():
# Generate dataset
X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10)
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
for normalize in [True, False]:
for fit_intercept in [True, False]:
coef = {}
intercept = {}
for dtype in [np.float64, np.float32]:
clf = ElasticNet(alpha=0.5, max_iter=100, precompute=False,
fit_intercept=fit_intercept,
normalize=normalize)
X = dtype(X)
y = dtype(y)
ignore_warnings(clf.fit)(X, y)
coef[('simple', dtype)] = clf.coef_
intercept[('simple', dtype)] = clf.intercept_
assert clf.coef_.dtype == dtype
# test precompute Gram array
Gram = X.T.dot(X)
clf_precompute = ElasticNet(alpha=0.5, max_iter=100,
precompute=Gram,
fit_intercept=fit_intercept,
normalize=normalize)
ignore_warnings(clf_precompute.fit)(X, y)
assert_array_almost_equal(clf.coef_, clf_precompute.coef_)
assert_array_almost_equal(clf.intercept_,
clf_precompute.intercept_)
# test multi task enet
multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_multioutput = MultiTaskElasticNet(
alpha=0.5, max_iter=100, fit_intercept=fit_intercept,
normalize=normalize)
clf_multioutput.fit(X, multi_y)
coef[('multi', dtype)] = clf_multioutput.coef_
intercept[('multi', dtype)] = clf_multioutput.intercept_
assert clf.coef_.dtype == dtype
for v in ['simple', 'multi']:
assert_array_almost_equal(coef[(v, np.float32)],
coef[(v, np.float64)],
decimal=4)
assert_array_almost_equal(intercept[(v, np.float32)],
intercept[(v, np.float64)],
decimal=4)
def test_enet_l1_ratio():
# Test that an error message is raised if an estimator that
# uses _alpha_grid is called with l1_ratio=0
msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. "
"Please supply a grid by providing your estimator with the "
"appropriate `alphas=` argument.")
X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T
y = np.array([12, 10, 11, 21, 5])
assert_raise_message(ValueError, msg, ElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y)
assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y[:, None])
# Test that l1_ratio=0 is allowed if we supply a grid manually
alphas = [0.1, 10]
estkwds = {'alphas': alphas, 'random_state': 42}
est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds)
est = ElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est_desired.fit(X, y)
est.fit(X, y)
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds)
est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est.fit(X, y[:, None])
est_desired.fit(X, y[:, None])
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
def test_coef_shape_not_zero():
est_no_intercept = Lasso(fit_intercept=False)
est_no_intercept.fit(np.c_[np.ones(3)], np.ones(3))
assert est_no_intercept.coef_.shape == (1,)
def test_warm_start_multitask_lasso():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, Y)
ignore_warnings(clf.fit)(X, Y) # do a second round with 5 iterations
clf2 = MultiTaskLasso(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, Y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
@pytest.mark.parametrize('klass, n_classes, kwargs',
[(Lasso, 1, dict(precompute=True)),
(Lasso, 1, dict(precompute=False)),
(MultiTaskLasso, 2, dict()),
(MultiTaskLasso, 2, dict())])
def test_enet_coordinate_descent(klass, n_classes, kwargs):
"""Test that a warning is issued if model does not converge"""
clf = klass(max_iter=2, **kwargs)
n_samples = 5
n_features = 2
X = np.ones((n_samples, n_features)) * 1e50
y = np.ones((n_samples, n_classes))
if klass == Lasso:
y = y.ravel()
assert_warns(ConvergenceWarning, clf.fit, X, y)
def test_convergence_warnings():
random_state = np.random.RandomState(0)
X = random_state.standard_normal((1000, 500))
y = random_state.standard_normal((1000, 3))
# check that the model fails to converge (a negative dual gap cannot occur)
with pytest.warns(ConvergenceWarning):
MultiTaskElasticNet(max_iter=1, tol=-1).fit(X, y)
# check that the model converges w/o warnings
with pytest.warns(None) as record:
MultiTaskElasticNet(max_iter=1000).fit(X, y)
assert not record.list
def test_sparse_input_convergence_warning():
X, y, _, _ = build_dataset(n_samples=1000, n_features=500)
with pytest.warns(ConvergenceWarning):
ElasticNet(max_iter=1, tol=0).fit(
sparse.csr_matrix(X, dtype=np.float32), y)
# check that the model converges w/o warnings
with pytest.warns(None) as record:
Lasso(max_iter=1000).fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert not record.list
@pytest.mark.parametrize("precompute, inner_precompute", [
(True, True),
('auto', False),
(False, False),
])
def test_lassoCV_does_not_set_precompute(monkeypatch, precompute,
inner_precompute):
X, y, _, _ = build_dataset()
calls = 0
class LassoMock(Lasso):
def fit(self, X, y):
super().fit(X, y)
nonlocal calls
calls += 1
assert self.precompute == inner_precompute
monkeypatch.setattr("sklearn.linear_model._coordinate_descent.Lasso",
LassoMock)
clf = LassoCV(precompute=precompute)
clf.fit(X, y)
assert calls > 0
def test_multi_task_lasso_cv_dtype():
n_samples, n_features = 10, 3
rng = np.random.RandomState(42)
X = rng.binomial(1, .5, size=(n_samples, n_features))
X = X.astype(int) # make it explicit that X is int
y = X[:, [0, 0]].copy()
est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y)
assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3)
@pytest.mark.parametrize('fit_intercept', [True, False])
@pytest.mark.parametrize('alpha', [0.01])
@pytest.mark.parametrize('normalize', [False, True])
@pytest.mark.parametrize('precompute', [False, True])
def test_enet_sample_weight_consistency(fit_intercept, alpha, normalize,
precompute):
"""Test that the impact of sample_weight is consistent."""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
params = dict(alpha=alpha, fit_intercept=fit_intercept,
precompute=precompute, tol=1e-6, l1_ratio=0.5)
reg = ElasticNet(**params).fit(X, y)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# sample_weight=None should be equivalent to sample_weight = number
sample_weight = 123.
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# scaling of sample_weight should have no effect, cf. np.average()
sample_weight = 2 * np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# setting one element of sample_weight to 0 is equivalent to removing
# the corresponding sample
sample_weight = np.ones_like(y)
sample_weight[-1] = 0
reg.fit(X, y, sample_weight=sample_weight)
coef1 = reg.coef_.copy()
if fit_intercept:
intercept1 = reg.intercept_
reg.fit(X[:-1], y[:-1])
assert_allclose(reg.coef_, coef1, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept1)
# check that multiplying sample_weight by 2 is equivalent
# to repeating corresponding samples twice
if sparse.issparse(X):
X = X.toarray()
X2 = np.concatenate([X, X[:n_samples//2]], axis=0)
y2 = np.concatenate([y, y[:n_samples//2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[:n_samples//2] = 2
reg1 = ElasticNet(**params).fit(
X, y, sample_weight=sample_weight_1
)
reg2 = ElasticNet(**params).fit(
X2, y2, sample_weight=None
)
assert_allclose(reg1.coef_, reg2.coef_)
def test_enet_sample_weight_sparse():
reg = ElasticNet()
X = sparse.csc_matrix(np.zeros((3, 2)))
y = np.array([-1, 0, 1])
sw = np.array([1, 2, 3])
with pytest.raises(ValueError, match="Sample weights do not.*support "
"sparse matrices"):
reg.fit(X, y, sample_weight=sw, check_input=True)
@pytest.mark.parametrize("backend", ["loky", "threading"])
@pytest.mark.parametrize("estimator",
[ElasticNetCV, MultiTaskElasticNetCV,
LassoCV, MultiTaskLassoCV])
def test_linear_models_cv_fit_for_all_backends(backend, estimator):
# LinearModelsCV.fit performs inplace operations on input data which is
# memmapped when using loky backend, causing an error due to unexpected
# behavior of fancy indexing of read-only memmaps (cf. numpy#14132).
if (parse_version(joblib.__version__) < parse_version('0.12')
and backend == 'loky'):
pytest.skip('loky backend does not exist in joblib <0.12')
# Create a problem sufficiently large to cause memmapping (1MB).
n_targets = 1 + (estimator in (MultiTaskElasticNetCV, MultiTaskLassoCV))
X, y = make_regression(20000, 10, n_targets=n_targets)
with joblib.parallel_backend(backend=backend):
estimator(n_jobs=2, cv=3).fit(X, y)
| |
"""Object-oriented interface to an experiment's data."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path as op
import re
from itertools import chain
from collections import OrderedDict
import numpy as np
import pandas as pd
import tables as tb
from klusta.traces.waveform import WaveformLoader, SpikeLoader
from klusta.kwik.model import (_concatenate_virtual_arrays,
_dat_to_traces,
)
from klusta.traces.filter import apply_filter, bandpass_filter
from selection import select, slice_to_indices
from kwiklib.dataio.kwik import (get_filenames, open_files, close_files,
add_spikes, add_cluster, add_cluster_group, remove_cluster,
remove_cluster_group)
from kwiklib.dataio.utils import convert_dtype
from kwiklib.dataio.spikecache import SpikeCache
from kwiklib.utils.six import (iteritems, string_types, iterkeys,
itervalues, next)
from kwiklib.utils.wrap import wrap
from kwiklib.utils.logger import warn, debug, info
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def _resolve_hdf5_path(files, path):
"""Resolve a HDF5 external link. Return the referred node (group or
dataset), or None if it does not exist.
Arguments:
* files: a dict {type: file_handle}.
* path: a string like "{type}/path/to/node" where `type` is one of
`kwx`, `raw.kwd`, etc.
"""
nodes = path.split('/')
path_ext = '/' + '/'.join(nodes[1:])
type = nodes[0]
pattern = r'\{([a-zA-Z\._]+)\}'
assert re.match(pattern, type)
r = re.search(pattern, type)
assert r
type = r.group(1)
# Resolve the link.
file = files.get(type, None)
if file:
return file.getNode(path_ext)
else:
return None
def _get_child_id(child):
id = child._v_name
if id.isdigit():
return int(id)
else:
return id
def _print_instance(obj, depth=0, name=''):
# Handle the first element of the list/dict.
r = []
if isinstance(obj, (list, dict)):
if not obj:
r = []
return r
if isinstance(obj, list):
sobj = obj[0]
key = '0'
elif isinstance(obj, dict):
key, sobj = next(iteritems(obj))
if isinstance(sobj, (list, dict, int, long, string_types, np.ndarray,
float)):
r = []
else:
r = [(depth+1, str(key))] + _print_instance(sobj, depth+1)
# Arrays do not have children.
elif isinstance(obj, (np.ndarray, tb.EArray)):
r = []
# Handle class instances.
elif hasattr(obj, '__dict__'):
vs = vars(obj)
if hasattr(obj, '__dir__'):
vs.update({name: getattr(obj, name)
for name in dir(obj)
if name not in ('CLASS', 'TITLE', 'VERSION')})
fields = {k: v
for k, v in iteritems(vs)
if not k.startswith('_')}
r = list(chain(*[_print_instance(fields[n], depth=depth+1, name=str(n))
for n in sorted(iterkeys(fields))]))
else:
r = []
# Add the current object's display string.
if name:
if isinstance(obj, tb.EArray):
s = name + ' [{dtype} {shape}]'.format(dtype=obj.dtype,
shape=obj.shape)
elif isinstance(obj, (string_types, int, long, float, tuple)) or obj is None:
s = name + ' = ' + str(obj)
else:
s = name
r = [(depth, s)] + r
return r
class ArrayProxy(object):
"""Proxy to a view of an array."""
def __init__(self, arr, col=None):
self._arr = arr
self._col = col
self.dtype = arr.dtype
@property
def shape(self):
return self._arr.shape[:-1]
def __getitem__(self, item):
if self._col is None:
return self._arr[item]
else:
if isinstance(item, tuple):
item += (self._col,)
return self._arr[item]
else:
return self._arr[item, ..., self._col]
# -----------------------------------------------------------------------------
# Node wrappers
# -----------------------------------------------------------------------------
class Node(object):
_files = None
_kwik = None
_node = None
_root = None
def __init__(self, files, node=None, root=None):
self._files = files
self._kwik = self._files.get('kwik', None)
assert self._kwik is not None
if node is None:
node = self._kwik.root
self._node = node
self._root = root
def _gen_children(self, container_name=None, child_class=None):
"""Return a dictionary {child_id: child_instance}."""
# The container with the children is either the current node, or
# a child of this node.
if container_name is None:
container = self._node
else:
container = self._node._f_getChild(container_name)
l = [
(_get_child_id(child), child_class(self._files, child, root=self._root))
for child in container
]
l = sorted(l, key=lambda (x,y): x)
return OrderedDict(l)
def _get_child(self, child_name):
"""Return the child specified by its name.
If this child has a `hdf5_path` special, then the path is resolved,
and the referred child in another file is returned.
"""
child = self._node._f_getChild(child_name)
try:
# There's a link that needs to be resolved: return it.
path = child._f_getAttr('hdf5_path')
return _resolve_hdf5_path(self._files, path)
except AttributeError:
# No HDF5 external link: just return the normal child.
return child
def __getattr__(self, key):
try:
return self.__dict__[key]
except:
try:
return self._node._f_getAttr(key)
except AttributeError:
warn(("{key} needs to be an attribute of "
"{node}").format(key=key, node=self._node._v_name))
return None
def __setattr__(self, key, value):
try:
self._node._f_getAttr(key)
self._node._f_setAttr(key, value)
except AttributeError:
super(Node, self).__setattr__(key, value)
class NodeWrapper(object):
"""Like a PyTables node, but supports in addition: `node.attr`."""
def __init__(self, node):
self._node = node
def __getitem__(self, key):
return self._node[key]
def __getattr__(self, key):
# Do not override if key is an attribute of this class.
if key.startswith('_'):
try:
return self.__dict__[key]
# Accept nodewrapper._method if _method is a method of the PyTables
# Node object.
except KeyError:
return getattr(self._node, key)
try:
# Return the wrapped node if the child is a group.
attr = getattr(self._node, key)
if isinstance(attr, tb.Group):
return NodeWrapper(attr)
else:
return attr
# Return the attribute.
except:
try:
return self._node._f_getAttr(key)
except AttributeError:
# NOTE: old format
if key == 'n_features_per_channel':
return self._node._f_getAttr('nfeatures_per_channel')
warn(("{key} needs to be an attribute of "
"{node}").format(key=key, node=self._node._v_name))
return None
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
return
# Ensure the key is an existing attribute to the current node.
try:
self._node._f_getAttr(key)
except AttributeError:
raise "{key} needs to be an attribute of {node}".format(
key=key, node=self._node._v_name)
# Set the attribute.
self._node._f_setAttr(key, value)
def __dir__(self):
return sorted(dir(self._node) + self._node._v_attrs._v_attrnames)
def __repr__(self):
return self._node.__repr__()
class DictVectorizer(object):
"""This object serves as a vectorized proxy for a dictionary of objects
that have individual fields of interest. For example: d={k: obj.attr1}.
The object dv = DictVectorizer(d, 'attr1.subattr') can be used as:
dv[3]
dv[[1,2,5]]
dv[2:4]
"""
def __init__(self, dict, path):
self._dict = dict
self._path = path.split('.')
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def _get_path(self, key):
"""Resolve the path recursively for a given key of the dictionary."""
val = self._dict[key]
for p in self._path:
val = getattr(val, p)
return val
def _set_path(self, key, value):
"""Resolve the path recursively for a given key of the dictionary,
and set a value."""
val = self._dict[key]
for p in self._path[:-1]:
val = getattr(val, p)
setattr(val, key, value)
def __getitem__(self, item):
if isinstance(item, slice):
item = slice_to_indices(item, lenindices=len(self._dict),
keys=sorted(self._dict.keys()))
if hasattr(item, '__len__'):
return np.array([self._get_path(k) for k in item])
else:
return self._get_path(item)
def __setitem__(self, item, value):
if key.startswith('_'):
self.__dict__[key] = value
return
if isinstance(item, slice):
item = slice_to_indices(item, lenindices=len(self._dict))
if hasattr(item, '__len__'):
if not hasattr(value, '__len__'):
value = [value] * len(item)
for k, val in zip(item, value):
self._set_path(k, value)
else:
return self._set_path(item, value)
def _read_traces(files, dtype=None, n_channels=None):
kwd_path = None
dat_path = None
kwik = files['kwik']
recordings = kwik.root.recordings
traces = []
# opened_files = []
# HACK when there is no recordings: find a .dat file with the same
# base name in the current directory.
if not recordings:
name = op.splitext(op.basename(kwik.filename))[0]
p = op.join(op.dirname(op.realpath(kwik.filename)), name + '.dat')
if op.exists(p):
dat = _dat_to_traces(p, dtype=dtype or 'int16',
n_channels=n_channels)
traces.append(dat)
for recording in recordings:
# Is there a path specified to a .raw.kwd file which exists in
# [KWIK]/recordings/[X]/raw? If so, open it.
raw = recording.raw
if 'hdf5_path' in raw._v_attrs:
kwd_path = raw._v_attrs.hdf5_path[:-8]
kwd = files['raw.kwd']
if kwd is None:
debug("%s not found, trying same basename in KWIK dir" %
kwd_path)
else:
debug("Loading traces: %s" % kwd_path)
traces.append(kwd.root.recordings._f_getChild(str(recording._v_name)).data)
# opened_files.append(kwd)
continue
# Is there a path specified to a .dat file which exists?
if 'dat_path' in raw._v_attrs:
dtype = kwik.root.application_data.spikedetekt._v_attrs.dtype[0]
if dtype:
dtype = np.dtype(dtype)
n_channels = kwik.root.application_data.spikedetekt._v_attrs. \
n_channels
if n_channels:
n_channels = int(n_channels)
assert dtype is not None
assert n_channels
dat_path = raw._v_attrs.dat_path
if not op.exists(dat_path):
debug("%s not found, trying same basename in KWIK dir" %
dat_path)
name = op.splitext(op.basename(kwik.filename))[0]
dat_path = op.join(op.dirname(op.realpath(kwik.filename)), name + '.dat')
if op.exists(dat_path):
debug("Loading traces: %s" % dat_path)
dat = _dat_to_traces(dat_path, dtype=dtype,
n_channels=n_channels)
traces.append(dat)
# opened_files.append(dat)
continue
if not traces:
warn("No traces found: the waveforms won't be available.")
return _concatenate_virtual_arrays(traces)
# -----------------------------------------------------------------------------
# Experiment class and sub-classes.
# -----------------------------------------------------------------------------
class Experiment(Node):
"""An Experiment instance holds all information related to an
experiment. One can access any information using a logical structure
that is somewhat independent from the physical representation on disk.
"""
def __init__(self, name=None, dir=None, files=None, mode='r', prm={}):
"""`name` must correspond to the basename of the files."""
self.name = name
self._dir = dir
self.dir = dir
self._mode = mode
self._files = files
self._prm = prm
if self._files is None:
self._files = open_files(self.name, dir=self._dir, mode=self._mode)
def _get_filename(file):
if file is None:
return None
else:
return os.path.realpath(file.filename)
self._filenames = {type: _get_filename(file)
for type, file in iteritems(self._files)}
super(Experiment, self).__init__(self._files)
self._root = self._node
# Ensure the version of the kwik format is exactly 2.
assert self._root._f_getAttr('kwik_version') == 2
self.application_data = NodeWrapper(self._root.application_data)
# self.user_data = NodeWrapper(self._root.user_data)
self.channel_groups = self._gen_children('channel_groups', ChannelGroup)
self.recordings = self._gen_children('recordings', Recording)
# self.event_types = self._gen_children('event_types', EventType)
# Initialize the spike cache of all channel groups.
for grp in self.channel_groups.itervalues():
grp.spikes.init_cache()
def gen_filename(self, extension):
if extension.startswith('.'):
extension = extension[1:]
return os.path.splitext(self._filenames['kwik'])[0] + '.' + extension
def __enter__(self):
return self
def close(self):
if self._files is not None:
close_files(self._files)
def __repr__(self):
n = "<Experiment '{name}'>".format(name=self.name)
l = _print_instance(self, name=n)
# print l
return '\n'.join(' '*d + s for d, s in l)
def __exit__ (self, type, value, tb):
self.close()
class ChannelGroup(Node):
def __init__(self, files, node=None, root=None):
super(ChannelGroup, self).__init__(files, node, root=root)
# self.application_data = NodeWrapper(self._node.application_data)
# self.user_data = NodeWrapper(self._node.user_data)
self.channels = self._gen_children('channels', Channel)
self.clusters = ClustersNode(self._files, self._node.clusters, root=self._root)
self.cluster_groups = ClusterGroupsNode(self._files, self._node.cluster_groups, root=self._root)
self.spikes = Spikes(self._files, self._node.spikes, root=self._root)
class Spikes(Node):
def __init__(self, files, node=None, root=None):
super(Spikes, self).__init__(files, node, root=root)
self.time_samples = self._node.time_samples
self.time_fractional = self._node.time_fractional
self.recording = self._node.recording
self.clusters = Clusters(self._files, self._node.clusters, root=self._root)
# Add concatenated time samples
self.concatenated_time_samples = self._compute_concatenated_time_samples()
self.channel_group_id = self._node._v_parent._v_name
# Get large datasets, that may be in external files.
# self.features_masks = self._get_child('features_masks')
# self.waveforms_raw = self._get_child('waveforms_raw')
# self.waveforms_filtered = self._get_child('waveforms_filtered')
# Load features masks directly from KWX.
g = self.channel_group_id
path = '/channel_groups/{}/features_masks'.format(g)
if files['kwx']:
self.features_masks = files['kwx'].getNode(path)
else:
self.features_masks = None
# Load raw data directly from raw data.
traces = _read_traces(files)
b = self._root.application_data.spikedetekt._f_getAttr('extract_s_before')
a = self._root.application_data.spikedetekt._f_getAttr('extract_s_after')
order = self._root.application_data.spikedetekt._f_getAttr('filter_butter_order')
rate = self._root.application_data.spikedetekt._f_getAttr('sample_rate')
low = self._root.application_data.spikedetekt._f_getAttr('filter_low')
if 'filter_high_factor' in self._root.application_data.spikedetekt._v_attrs:
high = self._root.application_data.spikedetekt._f_getAttr('filter_high_factor') * rate
else:
# NOTE: old format
high = self._root.application_data.spikedetekt._f_getAttr('filter_high')
b_filter = bandpass_filter(rate=rate,
low=low,
high=high,
order=order)
debug("Enable waveform filter.")
def the_filter(x, axis=0):
return apply_filter(x, b_filter, axis=axis)
filter_margin = order * 3
channels = self._root.channel_groups._f_getChild(self.channel_group_id)._f_getAttr('channel_order')
_waveform_loader = WaveformLoader(n_samples=(b, a),
traces=traces,
filter=the_filter,
filter_margin=filter_margin,
scale_factor=.01,
channels=channels,
)
self.waveforms_raw = SpikeLoader(_waveform_loader,
self.concatenated_time_samples)
self.waveforms_filtered = self.waveforms_raw
nspikes = len(self.time_samples)
if self.waveforms_raw is not None:
self.nsamples, self.nchannels = self.waveforms_raw.shape[1:]
if self.features_masks is None:
self.features_masks = np.zeros((nspikes, 1, 1), dtype=np.float32)
if len(self.features_masks.shape) == 3:
self.features = ArrayProxy(self.features_masks, col=0)
self.masks = ArrayProxy(self.features_masks, col=1)
elif len(self.features_masks.shape) == 2:
self.features = self.features_masks
self.masks = None #np.ones_like(self.features)
self.nfeatures = self.features.shape[1]
def _compute_concatenated_time_samples(self):
t_rel = self.time_samples[:]
recordings = self.recording[:]
if len(recordings) == 0 and len(t_rel) > 0:
recordings = np.zeros_like(t_rel)
# Get list of recordings.
recs = self._root.recordings
recs = sorted([int(_._v_name) for _ in recs._f_listNodes()])
# Get their start times.
if not recs:
return t_rel
start_times = np.zeros(max(recs)+1, dtype=np.uint64)
for r in recs:
recgrp = getattr(self._root.recordings, str(r))
sample_rate = recgrp._f_getAttr('sample_rate')
start_time = recgrp._f_getAttr('start_time') or 0.
start_times[r] = int(start_time * sample_rate)
return t_rel + start_times[recordings]
def add(self, **kwargs):
"""Add a spike. Only `time_samples` is mandatory."""
add_spikes(self._files, channel_group_id=self.channel_group_id, **kwargs)
def init_cache(self):
"""Initialize the cache for the features & masks."""
self._spikecache = SpikeCache(
# TODO: handle multiple clusterings in the spike cache here
spike_clusters=self.clusters.main,
features_masks=self.features_masks,
waveforms_raw=self.waveforms_raw,
waveforms_filtered=self.waveforms_filtered,
# TODO: put this value in the parameters
cache_fraction=1.,)
def load_features_masks_bg(self, *args, **kwargs):
return self._spikecache.load_features_masks_bg(*args, **kwargs)
def load_features_masks(self, *args, **kwargs):
return self._spikecache.load_features_masks(*args, **kwargs)
def load_waveforms(self, *args, **kwargs):
return self._spikecache.load_waveforms(*args, **kwargs)
def __getitem__(self, item):
raise NotImplementedError("""It is not possible to select entire spikes
yet.""")
def __len__(self):
return self.time_samples.shape[0]
class Clusters(Node):
"""The parent of main, original, etc. Contains multiple clusterings."""
def __init__(self, files, node=None, root=None):
super(Clusters, self).__init__(files, node, root=root)
# Each child of the Clusters group is assigned here.
for node in self._node._f_iterNodes():
setattr(self, node._v_name, node)
def copy(self, clustering_from, clustering_to):
spike_clusters_from = self._node._f_getChild(clustering_from)[:]
clusters_to = self._node._f_getChild(clustering_to)
clusters_to[:] = spike_clusters_from
group_from = self._node._v_parent._v_parent.clusters._f_getChild(clustering_from)
group_to = self._node._v_parent._v_parent.clusters._f_getChild(clustering_to)
group_from._f_copy(newname=clustering_to, overwrite=True, recursive=True)
class Clustering(Node):
"""An actual clustering, with the cluster numbers for all spikes."""
def __init__(self, files, node=None, root=None, child_class=None):
super(Clustering, self).__init__(files, node, root=root)
self._child_class = child_class
self._update()
def _update(self):
self._dict = self._gen_children(child_class=self._child_class)
self.color = DictVectorizer(self._dict, 'application_data.klustaviewa.color')
def __getitem__(self, item):
return self._dict[item]
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return len(self._dict)
def __contains__(self, v):
return v in self._dict
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def iteritems(self):
return self._dict.iteritems()
class ClustersClustering(Clustering):
"""An actual clustering, with color and group."""
# def __init__(self, *args, **kwargs):
# super(ClustersClustering, self).__init__(*args, **kwargs)
# self.group = DictVectorizer(self._dict, 'cluster_group')
def _update(self):
self._dict = self._gen_children(child_class=self._child_class)
self.color = DictVectorizer(self._dict, 'application_data.klustaviewa.color')
self.group = DictVectorizer(self._dict, 'cluster_group')
def add_cluster(self, id=None, color=None, **kwargs):
channel_group_id = self._node._v_parent._v_parent._v_name
clustering = self._node._v_name
add_cluster(self._files, channel_group_id=channel_group_id,
color=color,
id=str(id), clustering=clustering, **kwargs)
self._update()
def remove_cluster(self, id=None,):
channel_group_id = self._node._v_parent._v_parent._v_name
clustering = self._node._v_name
remove_cluster(self._files, channel_group_id=channel_group_id,
id=str(id), clustering=clustering)
self._update()
class ClusterGroupsClustering(Clustering):
def _update(self):
self._dict = self._gen_children(child_class=self._child_class)
self.color = DictVectorizer(self._dict, 'application_data.klustaviewa.color')
self.name = DictVectorizer(self._dict, 'name')
def add_group(self, id=None, color=None, name=None):
channel_group_id = self._node._v_parent._v_parent._v_name
clustering = self._node._v_name
add_cluster_group(self._files, channel_group_id=channel_group_id,
color=color, name=name,
id=str(id), clustering=clustering, )
self._update()
def remove_group(self, id=None,):
channel_group_id = self._node._v_parent._v_parent._v_name
clustering = self._node._v_name
remove_cluster_group(self._files, channel_group_id=channel_group_id,
id=str(id), clustering=clustering)
self._update()
class ClustersNode(Node):
"""The parent of clustering types: main, original..."""
def __init__(self, files, node=None, root=None):
super(ClustersNode, self).__init__(files, node, root=root)
# Each child of the group is assigned here.
for node in self._node._f_iterNodes():
setattr(self, node._v_name, ClustersClustering(self._files, node,
child_class=Cluster, root=self._root))
class ClusterGroupsNode(Node):
def __init__(self, files, node=None, root=None):
super(ClusterGroupsNode, self).__init__(files, node, root=root)
# Each child of the group is assigned here.
for node in self._node._f_iterNodes():
setattr(self, node._v_name, ClusterGroupsClustering(self._files, node, child_class=ClusterGroup))
class Channel(Node):
def __init__(self, files, node=None, root=None):
super(Channel, self).__init__(files, node, root=root)
# self.application_data = NodeWrapper(self._node.application_data)
# self.user_data = NodeWrapper(self._node.user_data)
class Cluster(Node):
def __init__(self, files, node=None, root=None):
super(Cluster, self).__init__(files, node, root=root)
# self.cluster_group = self._node._v_attrs.cluster_group
# self.mean_waveform_raw = self._node._v_attrs.mean_waveform_raw
# self.mean_waveform_filtered = self._node._v_attrs.mean_waveform_filtered
self.application_data = NodeWrapper(self._node.application_data)
# self.color = self.application_data.klustaviewa.color
# self.user_data = NodeWrapper(self._node.user_data)
# self.quality_measures = NodeWrapper(self._node.quality_measures)
def __getattr__(self, name):
if name == 'cluster_group':
def _process(cg):
if hasattr(cg, '__len__'):
if len(cg) > 0:
return cg[0]
else:
return 0
return cg
return _process(self._node._v_attrs.cluster_group)
return super(Cluster, self).__getattr__(name)
class ClusterGroup(Node):
def __init__(self, files, node=None, root=None):
super(ClusterGroup, self).__init__(files, node, root=root)
# self.application_data = NodeWrapper(self._node.application_data)
# self.user_data = NodeWrapper(self._node.user_data)
class Recording(Node):
def __init__(self, files, node=None, root=None):
super(Recording, self).__init__(files, node, root=root)
# self.name = self._node._v_attrs.name
# self.start_time = self._node._v_attrs.start_time
# self.start_sample = self._node._v_attrs.start_sample
# self.sample_rate = self._node._v_attrs.sample_rate
# self.bit_depth = self._node._v_attrs.bit_depth
# self.band_high = self._node._v_attrs.band_high
# self.band_low = self._node._v_attrs.band_low
# self.raw = self._get_child('raw')
# self.high = self._get_child('high')
# self.low = self._get_child('low')
# self.user_data = NodeWrapper(self._node.user_data)
class EventType(Node):
def __init__(self, files, node=None, root=None):
super(EventType, self).__init__(files, node, root=root)
self.events = Events(self._files, self._node.events)
# self.application_data = NodeWrapper(self._node.application_data)
# self.user_data = NodeWrapper(self._node.user_data)
class Events(Node):
def __init__(self, files, node=None, root=None):
super(Events, self).__init__(files, node, root=root)
self.time_samples = self._node.time_samples
self.recording = self._node.recording
# self.user_data = NodeWrapper(self._node.user_data)
| |
"""
@package mi.instrument.harvard.massp.ooicore.test.test_driver
@file marine-integrations/mi/instrument/harvard/massp/ooicore/driver.py
@author Peter Cable
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
import re
import time
from pprint import pformat
from collections import Counter
import unittest
import ntplib
import gevent
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.exceptions import ResourceError, BadRequest
from mi.core.exceptions import InstrumentCommandException
from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket
from mi.idk.comm_config import ConfigTypes
from mi.idk.unit_test import InstrumentDriverTestCase, LOCALHOST, ParameterTestConfigKey, AgentCapabilityType
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.core.instrument.instrument_driver import DriverConnectionState, DriverConfigKey, ResourceAgentState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.port_agent_process import PortAgentProcess
from mi.instrument.harvard.massp.ooicore.driver import InstrumentDriver, SlaveProtocol, NEWLINE
from mi.instrument.harvard.massp.ooicore.driver import DataParticleType
from mi.instrument.harvard.massp.ooicore.driver import ProtocolState
from mi.instrument.harvard.massp.ooicore.driver import ProtocolEvent
from mi.instrument.harvard.massp.ooicore.driver import Capability
from mi.instrument.harvard.massp.ooicore.driver import Parameter
from mi.instrument.harvard.massp.ooicore.driver import Protocol
from mi.instrument.harvard.massp.mcu.driver import Prompt as McuPrompt
import mi.instrument.harvard.massp.mcu.test.test_driver as mcu
import mi.instrument.harvard.massp.rga.test.test_driver as rga
import mi.instrument.harvard.massp.turbo.test.test_driver as turbo
from mi.core.log import get_logger
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
log = get_logger()
massp_startup_config = {DriverConfigKey.PARAMETERS: {Parameter.SAMPLE_INTERVAL: 3600}}
massp_startup_config[DriverConfigKey.PARAMETERS].update(mcu.mcu_startup_config[DriverConfigKey.PARAMETERS])
massp_startup_config[DriverConfigKey.PARAMETERS].update(turbo.turbo_startup_config[DriverConfigKey.PARAMETERS])
massp_startup_config[DriverConfigKey.PARAMETERS].update(rga.rga_startup_config[DriverConfigKey.PARAMETERS])
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.harvard.massp.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='4OW0M1',
instrument_agent_name='harvard_massp_ooicore',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config=massp_startup_config
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
# noinspection PyProtectedMember
class DriverTestMixinSub(DriverTestMixin):
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
def get_sample_interval(self):
one_minute = massp_startup_config[DriverConfigKey.PARAMETERS][mcu.Parameter.ONE_MINUTE]
# turbo spin up time is fixed
fixed_time = 60 * 15
# sample time is variable, based on the value of one_minute
sample_time = (one_minute / 1000.0) * 70
return sample_time + fixed_time
@staticmethod
def send_port_agent_packet(protocol, data):
"""
Send a port agent packet via got_data
@param protocol Instrument Protocol instance
@param data data to send
"""
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
# Push the response into the driver
protocol.got_data(port_agent_packet)
protocol.got_raw(port_agent_packet)
log.debug('Sent port agent packet containing: %r', data)
def send_side_effect(self, protocol, name):
"""
Side effect function generator - will send responses based on input
@param protocol Instrument protocol instance
@returns side effect function
"""
def inner(data):
"""
Inner function for side effect generator
@param data Data to send
@returns length of response
"""
my_response = str(self.responses[name].get(data.strip()))
log.trace('my_send data: %r responses: %r', data, self.responses[name])
if my_response is not None:
log.trace("my_send: data: %r, my_response: %r", data, my_response)
time.sleep(.1)
if name == 'rga':
self.send_port_agent_packet(protocol, my_response + '\n' + NEWLINE)
else:
self.send_port_agent_packet(protocol, my_response + NEWLINE)
return len(my_response)
return inner
responses = {
'mcu': {},
'turbo': {},
'rga': {},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_CALIBRATE',
'PROTOCOL_EVENT_ERROR',
'PROTOCOL_EVENT_POWEROFF',
'PROTOCOL_EVENT_START_NAFION_REGEN',
'PROTOCOL_EVENT_START_ION_REGEN',
'PROTOCOL_EVENT_START_MANUAL_OVERRIDE'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_STOP_AUTOSAMPLE',
'PROTOCOL_EVENT_STOP',
'PROTOCOL_EVENT_ERROR',
'DRIVER_EVENT_ACQUIRE_SAMPLE'],
ProtocolState.ERROR: ['PROTOCOL_EVENT_CLEAR'],
ProtocolState.POLL: ['PROTOCOL_EVENT_STOP', 'PROTOCOL_EVENT_ERROR'],
ProtocolState.CALIBRATE: ['PROTOCOL_EVENT_STOP', 'PROTOCOL_EVENT_ERROR'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT'],
ProtocolState.REGEN: ['PROTOCOL_EVENT_STOP_REGEN',
'PROTOCOL_EVENT_ERROR',
'PROTOCOL_EVENT_REGEN_COMPLETE'],
ProtocolState.MANUAL_OVERRIDE: ['PROTOCOL_EVENT_STOP_MANUAL_OVERRIDE',
'PROTOCOL_EVENT_GET_SLAVE_STATES',
'DRIVER_EVENT_CALIBRATE',
'PROTOCOL_EVENT_START1',
'PROTOCOL_EVENT_START2',
'PROTOCOL_EVENT_SAMPLE',
'PROTOCOL_EVENT_NAFREG',
'PROTOCOL_EVENT_IONREG',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_POWEROFF',
'PROTOCOL_EVENT_CLEAR',
'DRIVER_EVENT_ACQUIRE_STATUS',
'PROTOCOL_EVENT_START_TURBO',
'PROTOCOL_EVENT_STOP_TURBO',
'PROTOCOL_EVENT_START_SCAN',
'PROTOCOL_EVENT_STOP_SCAN'],
}
_driver_parameters = {
Parameter.SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
}
_driver_parameters.update(mcu.DriverTestMixinSub._driver_parameters)
_driver_parameters.update(rga.DriverTestMixinSub._driver_parameters)
_driver_parameters.update(turbo.DriverTestMixinSub._driver_parameters)
_driver_capabilities = {
# capabilities defined in the IOS
Capability.CALIBRATE: {STATES: [ProtocolState.COMMAND, ProtocolState.MANUAL_OVERRIDE]},
Capability.CLEAR: {STATES: [ProtocolState.ERROR, ProtocolState.MANUAL_OVERRIDE]},
Capability.POWEROFF: {STATES: [ProtocolState.COMMAND, ProtocolState.MANUAL_OVERRIDE]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_ION: {STATES: [ProtocolState.COMMAND]},
Capability.START_NAFION: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.START1: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.START2: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.SAMPLE: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.START_TURBO: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.STOP_TURBO: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.START_SCAN: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.STOP_SCAN: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.IONREG: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.NAFREG: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.STANDBY: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.STOP_REGEN: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
Capability.GET_SLAVE_STATES: {STATES: [ProtocolState.MANUAL_OVERRIDE]},
}
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def assert_initialize_driver(self, driver, initial_protocol_state=DriverProtocolState.COMMAND):
"""
Initialize an instrument driver with a mock port agent. This will allow us to test the
got data method. Will the instrument, using test mode, through it's connection state
machine. End result, the driver will be in test mode and the connection state will be
connected.
@param driver: Instrument driver instance.
@param initial_protocol_state: the state to force the driver too
"""
# Put the driver into test mode
driver.set_test_mode(True)
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.UNCONFIGURED)
# Now configure the driver with the mock_port_agent, verifying
# that the driver transitions to that state
config = {'mcu': {'mock_port_agent': Mock(spec=PortAgentClient)},
'rga': {'mock_port_agent': Mock(spec=PortAgentClient)},
'turbo': {'mock_port_agent': Mock(spec=PortAgentClient)}}
driver.configure(config=config)
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.DISCONNECTED)
# Invoke the connect method of the driver: should connect to mock
# port agent. Verify that the connection FSM transitions to CONNECTED,
# (which means that the FSM should now be reporting the ProtocolState).
driver.connect()
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverProtocolState.UNKNOWN)
# add a send side effect for each port agent
for slave in SlaveProtocol.list():
protocol = driver._slave_protocols[slave]
if slave == 'mcu':
protocol.set_init_params(mcu.mcu_startup_config)
self.responses[slave] = mcu.DriverTestMixinSub.responses
elif slave == 'rga':
protocol.set_init_params(rga.rga_startup_config)
self.responses[slave] = rga.DriverTestMixinSub.responses
elif slave == 'turbo':
protocol.set_init_params(turbo.turbo_startup_config)
self.responses[slave] = turbo.DriverTestMixinSub.responses
protocol._connection.send.side_effect = self.send_side_effect(protocol, slave)
protocol._init_params()
driver._protocol._param_dict.set_value(Parameter.SAMPLE_INTERVAL,
massp_startup_config['parameters'][Parameter.SAMPLE_INTERVAL])
# Force the instrument into a known state
self.assert_force_state(driver, initial_protocol_state)
self.assert_force_all_slave_states(driver, ProtocolState.COMMAND)
def assert_force_all_slave_states(self, driver, protocol_state):
for slave in SlaveProtocol.list():
self.assert_force_slave_state(driver, slave, protocol_state)
def assert_force_slave_state(self, driver, name, protocol_state):
driver._slave_protocols[name]._protocol_fsm.current_state = protocol_state
def assert_protocol_state_change(self, protocol, target_state, timeout=1):
end_time = time.time() + timeout
sleep_time = timeout / 20.0
while True:
if protocol.get_current_state() == target_state:
return
log.debug('assert_protocol_state_change -- state: %s target_state: %s',
protocol.get_current_state(), target_state)
time.sleep(sleep_time)
self.assertGreaterEqual(end_time, time.time(), msg='Failed to transition states within timeout')
def assert_sequence_handling(self, event):
"""
Test the state transitions for these events.
"""
# this test is only valid for these events...
self.assertIn(event, [Capability.ACQUIRE_SAMPLE, Capability.START_AUTOSAMPLE, Capability.CALIBRATE])
driver = self.test_connect()
slaves = driver._slave_protocols
self.clear_data_particle_queue()
# start autosample
driver._protocol._protocol_fsm.on_event(event)
# sleep to let protocol move the FSM to the correct state
# loop, because the monkey patched time doesn't reliably sleep long enough...
now = time.time()
while time.time() < (now+3):
time.sleep(1)
# master protocol sends START1 to mcu at start of sample, send response.
self.send_port_agent_packet(driver._slave_protocols['mcu'], McuPrompt.START1 + NEWLINE)
# modify turbo responses to indicate turbo is up to speed
self.responses['turbo'] = turbo.DriverTestMixinSub.responses_at_speed
# turbo should move to AT_SPEED
self.assert_protocol_state_change(slaves['turbo'], turbo.ProtocolState.AT_SPEED, 15)
# master protocol sends START2, mcu moves to START2, send response.
self.assert_protocol_state_change(slaves['mcu'], mcu.ProtocolState.START2, 2)
self.send_port_agent_packet(driver._slave_protocols['mcu'], McuPrompt.START2 + NEWLINE)
# rga should move to SCAN
self.assert_protocol_state_change(slaves['rga'], rga.ProtocolState.SCAN, 2)
if event == Capability.CALIBRATE:
# master protocol sends CAL, mcu moves to CAL
self.assert_protocol_state_change(slaves['mcu'], mcu.ProtocolState.CALIBRATE, 10)
# simulate calibrate complete, send calibrate finished
self.send_port_agent_packet(driver._slave_protocols['mcu'], McuPrompt.CAL_FINISHED + NEWLINE)
else:
# master protocol sends SAMPLE, mcu moves to SAMPLE
self.assert_protocol_state_change(slaves['mcu'], mcu.ProtocolState.SAMPLE, 10)
# simulate sample complete, send sample finished
self.send_port_agent_packet(driver._slave_protocols['mcu'], McuPrompt.SAMPLE_FINISHED + NEWLINE)
# turbo moves to SPINNING_DOWN
self.assert_protocol_state_change(slaves['turbo'], turbo.ProtocolState.SPINNING_DOWN, 15)
# swap turbo responses to stopped
self.responses['turbo'] = turbo.DriverTestMixinSub.responses_stopped
# all three slave protocols should move to COMMAND
self.assert_protocol_state_change(slaves['turbo'], turbo.ProtocolState.COMMAND, 15)
self.assert_protocol_state_change(slaves['mcu'], turbo.ProtocolState.COMMAND, 15)
self.assert_protocol_state_change(slaves['rga'], turbo.ProtocolState.COMMAND, 15)
# send a couple of data telegrams to the mcu, to verify particles are generated
self.send_port_agent_packet(slaves['mcu'], mcu.TELEGRAM_1 + NEWLINE)
self.send_port_agent_packet(slaves['mcu'], mcu.TELEGRAM_2 + NEWLINE)
if event == Capability.START_AUTOSAMPLE:
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.AUTOSAMPLE)
else:
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
particles = Counter()
for particle_dict in self._data_particle_received:
stream_type = particle_dict.get('stream_name')
self.assertIsNotNone(stream_type)
particles[stream_type] += 1
log.debug('Particles generated: %r', particles)
# verify we have received at least one of each particle type
for particle_type in DataParticleType.list():
self.assertGreaterEqual(particles[particle_type], 1)
def test_connect(self, *args, **kwargs):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, *args, **kwargs)
return driver
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
# self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_poll(self):
self.assert_sequence_handling(Capability.ACQUIRE_SAMPLE)
def test_autosample(self):
self.assert_sequence_handling(Capability.START_AUTOSAMPLE)
def test_calibrate(self):
self.assert_sequence_handling(Capability.CALIBRATE)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
self.maxDiff = None
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_manual_override(self):
"""
test the manual override state
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(Capability.START_MANUAL)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.MANUAL_OVERRIDE)
driver._protocol._protocol_fsm.on_event(Capability.START1)
self.send_port_agent_packet(driver._slave_protocols['mcu'], McuPrompt.START1 + NEWLINE)
driver._protocol._protocol_fsm.on_event(Capability.START_TURBO)
self.responses['turbo'] = turbo.DriverTestMixinSub.responses_at_speed
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
driver._protocol._protocol_fsm.on_event(Capability.STOP_SCAN)
driver._protocol._protocol_fsm.on_event(Capability.STOP_TURBO)
self.responses['turbo'] = turbo.DriverTestMixinSub.responses_stopped
driver._protocol._protocol_fsm.on_event(Capability.STOP_MANUAL)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
# #
# NOTE: execute U SETMINUTE01000 on the MCU prior to running these tests #
# #
###############################################################################
# noinspection PyMethodMayBeStatic,PyAttributeOutsideInit
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, DriverTestMixinSub):
def setUp(self):
self.port_agents = {}
InstrumentDriverIntegrationTestCase.setUp(self)
def init_port_agent(self):
"""
Launch the driver process and driver client. This is used in the
integration and qualification tests. The port agent abstracts the physical
interface with the instrument.
@returns pid to the logger process
"""
if self.port_agents:
log.error("Port agent already initialized")
return
log.debug("Startup Port Agent")
config = self.port_agent_config()
log.debug("port agent config: %s", config)
port_agents = {}
if config['instrument_type'] != ConfigTypes.MULTI:
config = {'only one port agent here!': config}
for name, each in config.items():
if type(each) != dict:
continue
port_agent_host = each.get('device_addr')
if port_agent_host is not None:
port_agent = PortAgentProcess.launch_process(each, timeout=60, test_mode=True)
port = port_agent.get_data_port()
pid = port_agent.get_pid()
if port_agent_host == LOCALHOST:
log.info('Started port agent pid %s listening at port %s' % (pid, port))
else:
log.info("Connecting to port agent on host: %s, port: %s", port_agent_host, port)
port_agents[name] = port_agent
self.addCleanup(self.stop_port_agent)
self.port_agents = port_agents
def stop_port_agent(self):
"""
Stop the port agent.
"""
log.info("Stop port agent")
if self.port_agents:
log.debug("found port agents, now stop them")
for agent in self.port_agents.values():
agent.stop()
self.port_agents = {}
def port_agent_comm_config(self):
"""
Generate the port agent comm config from the port agents
@return config
"""
config = {}
for name, each in self.port_agents.items():
port = each.get_data_port()
cmd_port = each.get_command_port()
config[name] = {
'addr': each._config['port_agent_addr'],
'port': port,
'cmd_port': cmd_port
}
return config
def assert_slave_state(self, name, state, timeout=30, sleep_time=.5, command_ok=False):
end_time = time.time() + timeout
while True:
if command_ok and self.driver_client.cmd_dvr('get_resource_state') == ProtocolState.COMMAND:
return
_, states = self.driver_client.cmd_dvr('execute_resource', Capability.GET_SLAVE_STATES)
if states.get(name) == state:
return
self.assertGreater(end_time, time.time(),
msg='Slave protocol [%s] failed to transition to %s before timeout' % (name, state))
log.debug('Failed to achieve target slave [%s] state: %s. Sleeping [%5.2fs left]',
name, state, end_time - time.time())
time.sleep(sleep_time)
def test_driver_process(self):
"""
Test for correct launch of driver process and communications, including asynchronous driver events.
Overridden to support multiple port agents.
"""
log.info("Ensuring driver process was started properly ...")
# Verify processes exist.
self.assertNotEqual(self.driver_process, None)
drv_pid = self.driver_process.getpid()
self.assertTrue(isinstance(drv_pid, int))
self.assertNotEqual(self.port_agents, None)
for port_agent in self.port_agents.values():
pagent_pid = port_agent.get_pid()
self.assertTrue(isinstance(pagent_pid, int))
# Send a test message to the process interface, confirm result.
log.debug("before 'process_echo'")
reply = self.driver_client.cmd_dvr('process_echo')
log.debug("after 'process_echo'")
self.assert_(reply.startswith('ping from resource ppid:'))
reply = self.driver_client.cmd_dvr('driver_ping', 'foo')
self.assert_(reply.startswith('driver_ping: foo'))
# Test the event thread publishes and client side picks up events.
events = [
'I am important event #1!',
'And I am important event #2!'
]
self.driver_client.cmd_dvr('test_events', events=events)
gevent.sleep(1)
# Confirm the events received are as expected.
self.assertEqual(self.events, events)
# Test the exception mechanism.
with self.assertRaises(ResourceError):
exception_str = 'Oh no, something bad happened!'
self.driver_client.cmd_dvr('test_exceptions', exception_str)
def test_get_parameters(self):
"""
Test get action for all parameters
"""
self.assert_initialize_driver()
for key, value in massp_startup_config[DriverConfigKey.PARAMETERS].iteritems():
self.assert_get(key, value)
def test_set_parameters(self):
"""
Test set action for all parameters
"""
self.assert_initialize_driver()
parameters = Parameter.dict()
parameters.update(turbo.Parameter.dict())
parameters.update(rga.Parameter.dict())
parameters.update(mcu.Parameter.dict())
constraints = turbo.ParameterConstraints.dict()
constraints.update(rga.ParameterConstraints.dict())
for name, parameter in parameters.iteritems():
value = massp_startup_config[DriverConfigKey.PARAMETERS].get(parameter)
# do we have a value to set?
if value is not None:
# is the parameter RW?
if not self._driver_parameters[parameter][self.READONLY]:
# is there a constraint for this parameter?
if name in constraints:
_, minimum, maximum = constraints[name]
# set within constraints
self.assert_set(parameter, minimum + 1)
else:
# set to startup value + 1
self.assert_set(parameter, value + 1)
else:
# readonly, assert exception on set
self.assert_set_exception(parameter)
def test_set_bogus_parameter(self):
"""
Verify setting a bad parameter raises an exception
"""
self.assert_initialize_driver()
self.assert_set_exception('BOGUS', 'CHEESE')
def test_out_of_range(self):
"""
Verify setting parameters out of range raises exceptions
"""
self.assert_initialize_driver()
parameters = Parameter.dict()
parameters.update(turbo.Parameter.dict())
parameters.update(rga.Parameter.dict())
parameters.update(mcu.Parameter.dict())
constraints = turbo.ParameterConstraints.dict()
constraints.update(rga.ParameterConstraints.dict())
log.debug('Testing out of range values.')
log.debug('Parameters: %s', pformat(parameters))
log.debug('Constraints: %s', pformat(constraints))
for parameter in constraints:
param = parameters[parameter]
if not self._driver_parameters[param][self.READONLY]:
_, minimum, maximum = constraints[parameter]
self.assert_set_exception(param, minimum - 1)
self.assert_set_exception(param, maximum + 1)
self.assert_set_exception(param, "strings aren't valid here!")
def test_bad_command(self):
"""
Verify sending a bad command raises an exception
"""
self.assert_initialize_driver()
self.assert_driver_command_exception('BAD_COMMAND', exception_class=InstrumentCommandException)
def test_incomplete_config(self):
"""
Break our startup config, then verify the driver raises an exception
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[rga.Parameter.EE]
failed = False
try:
# delete a required parameter
del (startup_params[rga.Parameter.EE])
# re-init to take our broken config
self.init_driver_process_client()
self.assert_initialize_driver()
failed = True
except ResourceError as e:
log.info('Exception thrown, test should pass: %r', e)
finally:
startup_params[rga.Parameter.EE] = old_value
if failed:
self.fail('Failed to throw exception on missing parameter')
def test_acquire_sample(self):
"""
Verify the acquire sample command
Particles are tested elsewhere, so skipped here.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.ACQUIRE_SAMPLE, state=ProtocolState.POLL)
self.assert_state_change(ProtocolState.COMMAND, self.get_sample_interval())
def test_autosample(self):
"""
Start autosample, verify we generate three RGA status particles, indicating two
complete sampling cycles and the start of a third...
"""
num_samples = 2
self.assert_initialize_driver()
self.assert_set(rga.Parameter.NF, 6)
self.assert_set(Parameter.SAMPLE_INTERVAL, self.get_sample_interval())
self.assert_driver_command(Capability.START_AUTOSAMPLE)
self.assert_async_particle_generation(rga.DataParticleType.RGA_STATUS, Mock(),
particle_count=num_samples,
timeout=self.get_sample_interval() * num_samples)
self.assert_driver_command(Capability.STOP_AUTOSAMPLE)
self.assert_state_change(ProtocolState.COMMAND, timeout=self.get_sample_interval())
def test_nafreg(self):
"""
Verify Nafion Regeneration sequence
This runs about 2 hours with "normal" timing, should be a few minutes as configured.
May throw an exception due to short run time, as the target temperature may not be achieved.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_NAFION)
self.assert_state_change(ProtocolState.REGEN, 10)
self.assert_state_change(ProtocolState.COMMAND, 600)
def test_ionreg(self):
"""
Verify Ion Chamber Regeneration sequence
This runs about 2 hours with "normal" timing, should be a few minutes as configured.
May throw an exception due to short run time, as the target temperature may not be achieved.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_ION)
self.assert_state_change(ProtocolState.REGEN, 10)
self.assert_state_change(ProtocolState.COMMAND, 600)
def test_manual_override(self):
"""
Test the manual override mode. Verify we can go through an entire sample sequence manually.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_MANUAL, state=ProtocolState.MANUAL_OVERRIDE)
self.assert_driver_command(Capability.START1)
self.assert_slave_state('mcu', mcu.ProtocolState.WAITING_TURBO, timeout=90, sleep_time=2)
self.assert_driver_command(Capability.START_TURBO)
self.assert_slave_state('turbo', turbo.ProtocolState.AT_SPEED, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.START2)
self.assert_slave_state('mcu', mcu.ProtocolState.WAITING_RGA, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.START_SCAN)
self.assert_slave_state('rga', rga.ProtocolState.SCAN, timeout=60, sleep_time=1)
self.assert_driver_command(Capability.SAMPLE)
self.assert_slave_state('mcu', mcu.ProtocolState.SAMPLE, timeout=60, sleep_time=1)
self.assert_slave_state('mcu', mcu.ProtocolState.STOPPING, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.STOP_SCAN)
self.assert_slave_state('rga', rga.ProtocolState.COMMAND, timeout=60, sleep_time=1)
self.assert_driver_command(Capability.STOP_TURBO)
self.assert_slave_state('turbo', turbo.ProtocolState.SPINNING_DOWN, timeout=60, sleep_time=1)
self.assert_slave_state('turbo', turbo.ProtocolState.COMMAND, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.STANDBY)
self.assert_slave_state('mcu', mcu.ProtocolState.COMMAND, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.STOP_MANUAL, state=ProtocolState.COMMAND)
def test_exit_manual_override(self):
"""
Test that we when we exit manual override in sequence, all slave protocols return to COMMAND
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_MANUAL, state=ProtocolState.MANUAL_OVERRIDE)
self.assert_driver_command(Capability.START1)
self.assert_slave_state('mcu', mcu.ProtocolState.WAITING_TURBO, timeout=90, sleep_time=2)
self.assert_driver_command(Capability.START_TURBO)
self.assert_slave_state('turbo', turbo.ProtocolState.AT_SPEED, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.START2)
self.assert_slave_state('mcu', mcu.ProtocolState.WAITING_RGA, timeout=600, sleep_time=10)
self.assert_driver_command(Capability.START_SCAN)
self.assert_slave_state('rga', rga.ProtocolState.SCAN, timeout=60, sleep_time=1)
self.assert_driver_command(Capability.SAMPLE)
self.assert_slave_state('mcu', mcu.ProtocolState.SAMPLE, timeout=60, sleep_time=1)
self.assert_driver_command(Capability.STOP_MANUAL)
self.assert_state_change(ProtocolState.COMMAND, timeout=120)
self.assert_slave_state('rga', rga.ProtocolState.COMMAND, command_ok=True)
self.assert_slave_state('turbo', turbo.ProtocolState.COMMAND, command_ok=True)
self.assert_slave_state('mcu', mcu.ProtocolState.COMMAND, command_ok=True)
@unittest.skip('Test runs approximately 1 hour')
def test_full_sample(self):
"""
Run a sample with the "normal" timing
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[mcu.Parameter.ONE_MINUTE]
failed = False
try:
startup_params[mcu.Parameter.ONE_MINUTE] = 60000
# re-init to take our new config
self.init_driver_process_client()
self.test_acquire_sample()
except Exception as e:
failed = True
log.info('Exception thrown, test should fail: %r', e)
finally:
startup_params[mcu.Parameter.ONE_MINUTE] = old_value
if failed:
self.fail('Failed to acquire sample with normal timing')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
# noinspection PyMethodMayBeStatic,PyAttributeOutsideInit,PyProtectedMember
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def init_port_agent(self):
"""
Launch the driver process and driver client. This is used in the
integration and qualification tests. The port agent abstracts the physical
interface with the instrument.
@return pid to the logger process
"""
if self.port_agent:
log.error("Port agent already initialized")
return
log.debug("Startup Port Agent")
config = self.port_agent_config()
log.debug("port agent config: %s", config)
port_agents = {}
if config['instrument_type'] != ConfigTypes.MULTI:
config = {'only one port agent here!': config}
for name, each in config.items():
if type(each) != dict:
continue
port_agent_host = each.get('device_addr')
if port_agent_host is not None:
port_agent = PortAgentProcess.launch_process(each, timeout=60, test_mode=True)
port = port_agent.get_data_port()
pid = port_agent.get_pid()
if port_agent_host == LOCALHOST:
log.info('Started port agent pid %s listening at port %s' % (pid, port))
else:
log.info("Connecting to port agent on host: %s, port: %s", port_agent_host, port)
port_agents[name] = port_agent
self.addCleanup(self.stop_port_agent)
self.port_agents = port_agents
def stop_port_agent(self):
"""
Stop the port agent.
"""
log.info("Stop port agent")
if self.port_agents:
log.debug("found port agents, now stop them")
for agent in self.port_agents.values():
agent.stop()
self.port_agents = {}
def port_agent_comm_config(self):
"""
Generate the port agent comm config from the port agents
@return config
"""
config = {}
for name, each in self.port_agents.items():
port = each.get_data_port()
cmd_port = each.get_command_port()
config[name] = {
'addr': each._config['port_agent_addr'],
'port': port,
'cmd_port': cmd_port
}
return config
def init_instrument_agent_client(self):
"""
Overridden to handle multiple port agent config
"""
log.info("Start Instrument Agent Client")
# Driver config
driver_config = {
'dvr_mod': self.test_config.driver_module,
'dvr_cls': self.test_config.driver_class,
'workdir': self.test_config.working_dir,
'process_type': (self.test_config.driver_process_type,),
'comms_config': self.port_agent_comm_config(),
'startup_config': self.test_config.driver_startup_config
}
# Create agent config.
agent_config = {
'driver_config': driver_config,
'stream_config': self.data_subscribers.stream_config,
'agent': {'resource_id': self.test_config.agent_resource_id},
'test_mode': True # Enable a poison pill. If the spawning process dies
## shutdown the daemon process.
}
log.debug("Agent Config: %s", agent_config)
# Start instrument agent client.
self.instrument_agent_manager.start_client(
name=self.test_config.agent_name,
module=self.test_config.agent_module,
cls=self.test_config.agent_class,
config=agent_config,
resource_id=self.test_config.agent_resource_id,
deploy_file=self.test_config.container_deploy_file
)
self.instrument_agent_client = self.instrument_agent_manager.instrument_agent_client
def assert_da_command(self, command, response=None, max_retries=None):
"""
Assert direct access command returns the expected response
@param command: command to send
@param response: expected response
@param max_retries: maximum number of retries
@return: result of command
"""
self.tcp_client.send_data(command + NEWLINE)
if response:
if max_retries:
result = self.tcp_client.expect_regex(response, max_retries=max_retries)
else:
result = self.tcp_client.expect_regex(response)
self.assertTrue(result)
return result
def test_discover(self):
"""
Overridden because we do not discover to autosample.
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
We want to verify direct access to all three parts of the instrument, so we'll need
to go through most of the sample sequence
"""
_turbo = 'turbo:'
_mcu = 'mcu:'
_rga = 'rga:'
q_current = _turbo + turbo.DriverTestMixinSub.query_current
q_voltage = _turbo + turbo.DriverTestMixinSub.query_voltage
q_bearing = _turbo + turbo.DriverTestMixinSub.query_temp_bearing
q_motor = _turbo + turbo.DriverTestMixinSub.query_temp_motor
q_speed = _turbo + turbo.DriverTestMixinSub.query_speed_actual
turbo_response = re.compile('\d{10}(\d{6})\d{3}\r')
self.assert_direct_access_start_telnet(session_timeout=6000, inactivity_timeout=120)
self.assertTrue(self.tcp_client)
# start1
self.assert_da_command(_mcu + mcu.InstrumentCommand.BEAT, '(%s)' % mcu.Prompt.BEAT)
self.assert_da_command(_mcu + mcu.InstrumentCommand.START1, '(%s)' % mcu.Prompt.START1, max_retries=60)
# sleep a bit to give the turbo time to power on
time.sleep(20)
# spin up the turbo
self.assert_da_command(_turbo + turbo.DriverTestMixinSub.set_station_on,
turbo.DriverTestMixinSub.set_station_on)
self.assert_da_command(_turbo + turbo.DriverTestMixinSub.set_pump_on,
turbo.DriverTestMixinSub.set_pump_on)
for x in range(20):
current = int(self.assert_da_command(q_current, turbo_response).group(1))
voltage = int(self.assert_da_command(q_voltage, turbo_response).group(1))
bearing = int(self.assert_da_command(q_bearing, turbo_response).group(1))
motor = int(self.assert_da_command(q_motor, turbo_response).group(1))
speed = int(self.assert_da_command(q_speed, turbo_response).group(1))
log.debug('current: %d voltage: %d bearing: %d motor: %d speed: %d',
current, voltage, bearing, motor, speed)
if speed > 90000:
break
time.sleep(5)
# turbo is up to speed, send START2
self.assert_da_command(_mcu + mcu.InstrumentCommand.BEAT, '(%s)' % mcu.Prompt.BEAT)
self.assert_da_command(_mcu + mcu.InstrumentCommand.START2, '(%s)' % mcu.Prompt.START2, max_retries=120)
# sleep for a bit for the RGA to turn on
time.sleep(5)
result = self.assert_da_command(_rga + rga.InstrumentCommand.ID + '?', r'(\w*RGA\w*)').group()
log.debug('RGA response: %r', result)
# success! Stop the turbo
self.assert_da_command(_turbo + turbo.DriverTestMixinSub.set_station_off,
turbo.DriverTestMixinSub.set_station_off)
self.assert_da_command(_turbo + turbo.DriverTestMixinSub.set_pump_off,
turbo.DriverTestMixinSub.set_pump_off)
# wait just a moment to allow the turbo to start spinning down...
time.sleep(1)
# put the MCU in standby
self.assert_da_command(_mcu + mcu.InstrumentCommand.STANDBY, '(%s)' % mcu.Prompt.STANDBY, max_retries=60)
self.assert_direct_access_stop_telnet()
def test_poll(self):
"""
Poll for a single sample
"""
self.assert_enter_command_mode()
self.assert_set_parameter(rga.Parameter.NF, 5)
self.assert_set_parameter(rga.Parameter.MF, 50)
self.assert_execute_resource(Capability.ACQUIRE_SAMPLE, timeout=100)
# particles are verified in slave protocol qual tests... Here we just verify they are published
self.assert_particle_async(mcu.DataParticleType.MCU_STATUS, Mock(), timeout=90)
self.assert_particle_async(turbo.DataParticleType.TURBO_STATUS, Mock(), particle_count=20, timeout=500)
self.assert_particle_async(rga.DataParticleType.RGA_STATUS, Mock(), timeout=600)
self.assert_particle_async(rga.DataParticleType.RGA_SAMPLE, Mock(), particle_count=5, timeout=600)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, timeout=100)
def test_autosample(self):
"""
start and stop autosample and verify data particle
"""
self.assert_enter_command_mode()
self.assert_set_parameter(rga.Parameter.NF, 5)
self.assert_set_parameter(rga.Parameter.MF, 50)
self.assert_set_parameter(Parameter.SAMPLE_INTERVAL, 800)
self.assert_execute_resource(Capability.START_AUTOSAMPLE, timeout=100)
# particles are verified in slave protocol qual tests... Here we just verify they are published
self.assert_particle_async(mcu.DataParticleType.MCU_STATUS, Mock(), timeout=90)
self.assert_particle_async(turbo.DataParticleType.TURBO_STATUS, Mock(), particle_count=20, timeout=500)
self.assert_particle_async(rga.DataParticleType.RGA_STATUS, Mock(), timeout=600)
self.assert_particle_async(rga.DataParticleType.RGA_SAMPLE, Mock(), particle_count=5, timeout=600)
# to verify we are actually autosampling, wait for another RGA_STATUS, which occurs once per sample cycle
self.assert_particle_async(rga.DataParticleType.RGA_STATUS, Mock(), timeout=900)
self.assert_execute_resource(Capability.STOP_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, timeout=900)
def test_nafion(self):
"""
Test the nafion regeneration command. Nafion regen takes approx. 2 hours,
should run for 2 minutes if U SETMINUTE01000 has been executed.
"""
self.assert_enter_command_mode()
self.assert_execute_resource(Capability.START_NAFION)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, timeout=600)
def test_ion(self):
"""
Test the ion chamber regeneration command. Ion chamber regen takes approx. 2 hours,
should run for 2 minutes if U SETMINUTE01000 has been executed.
"""
self.assert_enter_command_mode()
self.assert_execute_resource(Capability.START_ION)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, timeout=600)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
parameters = Parameter.dict()
parameters.update(turbo.Parameter.dict())
parameters.update(rga.Parameter.dict())
parameters.update(mcu.Parameter.dict())
constraints = turbo.ParameterConstraints.dict()
constraints.update(rga.ParameterConstraints.dict())
for name, parameter in parameters.iteritems():
if parameter == Parameter.ALL:
continue
if self._driver_parameters[parameter][self.READONLY]:
with self.assertRaises(BadRequest):
self.assert_set_parameter(parameter, 'READONLY')
else:
value = massp_startup_config[DriverConfigKey.PARAMETERS].get(parameter)
if value is not None:
if name in constraints:
_, minimum, maximum = constraints[name]
self.assert_set_parameter(parameter, minimum + 1)
with self.assertRaises(BadRequest):
self.assert_set_parameter(parameter, maximum + 1)
else:
self.assert_set_parameter(parameter, value + 1)
def test_get_capabilities(self):
"""
Walk through all driver protocol states and verify capabilities
returned by get_current_capabilities
"""
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.START_ION,
ProtocolEvent.START_NAFION,
ProtocolEvent.CALIBRATE,
ProtocolEvent.POWEROFF,
ProtocolEvent.START_MANUAL],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_enter_command_mode()
self.assert_capabilities(capabilities)
##################
# Poll Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: [],
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_execute_resource(Capability.ACQUIRE_SAMPLE)
self.assert_state_change(ResourceAgentState.BUSY, ProtocolState.POLL, 20)
self.assert_capabilities(capabilities)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 900)
##################
# Autosample Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.STREAMING),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_SAMPLE],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_execute_resource(Capability.START_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 20)
self.assert_capabilities(capabilities)
self.assert_execute_resource(Capability.STOP_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 900)
##################
# Calibrate Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: [],
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_execute_resource(Capability.CALIBRATE)
self.assert_state_change(ResourceAgentState.BUSY, ProtocolState.CALIBRATE, 20)
self.assert_capabilities(capabilities)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 900)
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_direct_access_telnet_closed(self):
"""
Test that we can properly handle the situation when a direct access
session is launched, the telnet is closed, then direct access is stopped.
Overridden to increase timeout due to long MCU reset time.
"""
self.assert_enter_command_mode()
self.assert_direct_access_start_telnet(timeout=600)
self.assertTrue(self.tcp_client)
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.COMMAND, DriverProtocolState.COMMAND, 120)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import mock
from oslo_serialization import jsonutils
import six
from testtools import matchers
from keystoneclient import fixture
from keystoneclient.tests.unit.v2_0 import utils
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_TENANT_ID = 'tenant_id'
DEFAULT_TENANT_NAME = 'tenant_name'
DEFAULT_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
DEFAULT_ADMIN_URL = 'http://127.0.0.1:35357/v2.0/'
class ShellTests(utils.TestCase):
TEST_URL = DEFAULT_ADMIN_URL
def setUp(self):
"""Patch os.environ to avoid required auth info."""
super(ShellTests, self).setUp()
self.addCleanup(setattr, os, 'environ', os.environ.copy())
os.environ = {
'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_TENANT_ID': DEFAULT_TENANT_ID,
'OS_TENANT_NAME': DEFAULT_TENANT_NAME,
'OS_AUTH_URL': DEFAULT_AUTH_URL,
}
import keystoneclient.shell
self.shell = keystoneclient.shell.OpenStackIdentityShell()
self.token = fixture.V2Token()
self.token.set_scope()
svc = self.token.add_service('identity')
svc.add_endpoint(public=DEFAULT_AUTH_URL,
admin=DEFAULT_ADMIN_URL)
self.stub_auth(json=self.token, base_url=DEFAULT_AUTH_URL)
def run_command(self, cmd):
orig = sys.stdout
try:
sys.stdout = six.StringIO()
if isinstance(cmd, list):
self.shell.main(cmd)
else:
self.shell.main(cmd.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_value.code, 0)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
def assert_called(self, method, path, base_url=TEST_URL):
self.assertEqual(method, self.requests_mock.last_request.method)
self.assertEqual(base_url + path.lstrip('/'),
self.requests_mock.last_request.url)
def test_user_list(self):
self.stub_url('GET', ['users'], json={'users': []})
self.run_command('user-list')
self.assert_called('GET', '/users')
def test_user_create(self):
self.stub_url('POST', ['users'], json={'user': {}})
self.run_command('user-create --name new-user')
self.assert_called('POST', '/users')
self.assertRequestBodyIs(json={'user': {'email': None,
'password': None,
'enabled': True,
'name': 'new-user',
'tenantId': None}})
@mock.patch('sys.stdin', autospec=True)
def test_user_create_password_prompt(self, mock_stdin):
self.stub_url('POST', ['users'], json={'user': {}})
with mock.patch('getpass.getpass') as mock_getpass:
del(os.environ['OS_PASSWORD'])
mock_stdin.isatty = lambda: True
mock_getpass.return_value = 'newpass'
self.run_command('user-create --name new-user --pass')
self.assert_called('POST', '/users')
self.assertRequestBodyIs(json={'user': {'email': None,
'password': 'newpass',
'enabled': True,
'name': 'new-user',
'tenantId': None}})
def test_user_get(self):
self.stub_url('GET', ['users', '1'],
json={'user': {'id': '1'}})
self.run_command('user-get 1')
self.assert_called('GET', '/users/1')
def test_user_delete(self):
self.stub_url('GET', ['users', '1'],
json={'user': {'id': '1'}})
self.stub_url('DELETE', ['users', '1'])
self.run_command('user-delete 1')
self.assert_called('DELETE', '/users/1')
def test_user_password_update(self):
self.stub_url('GET', ['users', '1'],
json={'user': {'id': '1'}})
self.stub_url('PUT', ['users', '1', 'OS-KSADM', 'password'])
self.run_command('user-password-update --pass newpass 1')
self.assert_called('PUT', '/users/1/OS-KSADM/password')
def test_user_update(self):
self.stub_url('PUT', ['users', '1'])
self.stub_url('GET', ['users', '1'],
json={"user": {"tenantId": "1",
"enabled": "true",
"id": "1",
"name": "username"}})
self.run_command('user-update --name new-user1'
' --email user@email.com --enabled true 1')
self.assert_called('PUT', '/users/1')
body = {'user': {'id': '1', 'email': 'user@email.com',
'enabled': True, 'name': 'new-user1'}}
self.assertRequestBodyIs(json=body)
required = 'User not updated, no arguments present.'
out = self.run_command('user-update 1')
self.assertThat(out, matchers.MatchesRegex(required))
self.run_command(['user-update', '--email', '', '1'])
self.assert_called('PUT', '/users/1')
self.assertRequestBodyIs(json={'user': {'id': '1', 'email': ''}})
def test_role_create(self):
self.stub_url('POST', ['OS-KSADM', 'roles'], json={'role': {}})
self.run_command('role-create --name new-role')
self.assert_called('POST', '/OS-KSADM/roles')
self.assertRequestBodyIs(json={"role": {"name": "new-role"}})
def test_role_get(self):
self.stub_url('GET', ['OS-KSADM', 'roles', '1'],
json={'role': {'id': '1'}})
self.run_command('role-get 1')
self.assert_called('GET', '/OS-KSADM/roles/1')
def test_role_list(self):
self.stub_url('GET', ['OS-KSADM', 'roles'], json={'roles': []})
self.run_command('role-list')
self.assert_called('GET', '/OS-KSADM/roles')
def test_role_delete(self):
self.stub_url('GET', ['OS-KSADM', 'roles', '1'],
json={'role': {'id': '1'}})
self.stub_url('DELETE', ['OS-KSADM', 'roles', '1'])
self.run_command('role-delete 1')
self.assert_called('DELETE', '/OS-KSADM/roles/1')
def test_user_role_add(self):
self.stub_url('GET', ['users', '1'],
json={'user': {'id': '1'}})
self.stub_url('GET', ['OS-KSADM', 'roles', '1'],
json={'role': {'id': '1'}})
self.stub_url('PUT', ['users', '1', 'roles', 'OS-KSADM', '1'])
self.run_command('user-role-add --user_id 1 --role_id 1')
self.assert_called('PUT', '/users/1/roles/OS-KSADM/1')
def test_user_role_list(self):
self.stub_url('GET', ['tenants', self.token.tenant_id],
json={'tenant': {'id': self.token.tenant_id}})
self.stub_url('GET', ['tenants', self.token.tenant_id,
'users', self.token.user_id, 'roles'],
json={'roles': []})
url = '/tenants/%s/users/%s/roles' % (self.token.tenant_id,
self.token.user_id)
self.run_command('user-role-list --user_id %s --tenant-id %s' %
(self.token.user_id, self.token.tenant_id))
self.assert_called('GET', url)
self.run_command('user-role-list --user_id %s' % self.token.user_id)
self.assert_called('GET', url)
self.run_command('user-role-list')
self.assert_called('GET', url)
def test_user_role_remove(self):
self.stub_url('GET', ['users', '1'],
json={'user': {'id': 1}})
self.stub_url('GET', ['OS-KSADM', 'roles', '1'],
json={'role': {'id': 1}})
self.stub_url('DELETE',
['users', '1', 'roles', 'OS-KSADM', '1'])
self.run_command('user-role-remove --user_id 1 --role_id 1')
self.assert_called('DELETE', '/users/1/roles/OS-KSADM/1')
def test_tenant_create(self):
self.stub_url('POST', ['tenants'], json={'tenant': {}})
self.run_command('tenant-create --name new-tenant')
self.assertRequestBodyIs(json={"tenant": {"enabled": True,
"name": "new-tenant",
"description": None}})
def test_tenant_get(self):
self.stub_url('GET', ['tenants', '2'], json={'tenant': {}})
self.run_command('tenant-get 2')
self.assert_called('GET', '/tenants/2')
def test_tenant_list(self):
self.stub_url('GET', ['tenants'], json={'tenants': []})
self.run_command('tenant-list')
self.assert_called('GET', '/tenants')
def test_tenant_update(self):
self.stub_url('GET', ['tenants', '1'],
json={'tenant': {'id': '1'}})
self.stub_url('GET', ['tenants', '2'],
json={'tenant': {'id': '2'}})
self.stub_url('POST', ['tenants', '2'],
json={'tenant': {'id': '2'}})
self.run_command('tenant-update'
' --name new-tenant1 --enabled false'
' --description desc 2')
self.assert_called('POST', '/tenants/2')
self.assertRequestBodyIs(json={"tenant": {"enabled": False,
"id": "2",
"description": "desc",
"name": "new-tenant1"}})
required = 'Tenant not updated, no arguments present.'
out = self.run_command('tenant-update 1')
self.assertThat(out, matchers.MatchesRegex(required))
def test_tenant_delete(self):
self.stub_url('GET', ['tenants', '2'],
json={'tenant': {'id': '2'}})
self.stub_url('DELETE', ['tenants', '2'])
self.run_command('tenant-delete 2')
self.assert_called('DELETE', '/tenants/2')
def test_service_create_with_required_arguments_only(self):
self.stub_url('POST', ['OS-KSADM', 'services'],
json={'OS-KSADM:service': {}})
self.run_command('service-create --type compute')
self.assert_called('POST', '/OS-KSADM/services')
json = {"OS-KSADM:service": {"type": "compute",
"name": None,
"description": None}}
self.assertRequestBodyIs(json=json)
def test_service_create_with_all_arguments(self):
self.stub_url('POST', ['OS-KSADM', 'services'],
json={'OS-KSADM:service': {}})
self.run_command('service-create --type compute '
'--name service1 --description desc1')
self.assert_called('POST', '/OS-KSADM/services')
json = {"OS-KSADM:service": {"type": "compute",
"name": "service1",
"description": "desc1"}}
self.assertRequestBodyIs(json=json)
def test_service_get(self):
self.stub_url('GET', ['OS-KSADM', 'services', '1'],
json={'OS-KSADM:service': {'id': '1'}})
self.run_command('service-get 1')
self.assert_called('GET', '/OS-KSADM/services/1')
def test_service_list(self):
self.stub_url('GET', ['OS-KSADM', 'services'],
json={'OS-KSADM:services': []})
self.run_command('service-list')
self.assert_called('GET', '/OS-KSADM/services')
def test_service_delete(self):
self.stub_url('GET', ['OS-KSADM', 'services', '1'],
json={'OS-KSADM:service': {'id': 1}})
self.stub_url('DELETE', ['OS-KSADM', 'services', '1'])
self.run_command('service-delete 1')
self.assert_called('DELETE', '/OS-KSADM/services/1')
def test_catalog(self):
self.run_command('catalog')
self.run_command('catalog --service compute')
def test_ec2_credentials_create(self):
self.stub_url('POST',
['users', self.token.user_id, 'credentials', 'OS-EC2'],
json={'credential': {}})
url = '/users/%s/credentials/OS-EC2' % self.token.user_id
self.run_command('ec2-credentials-create --tenant-id 1 '
'--user-id %s' % self.token.user_id)
self.assert_called('POST', url)
self.assertRequestBodyIs(json={'tenant_id': '1'})
self.run_command('ec2-credentials-create --tenant-id 1')
self.assert_called('POST', url)
self.assertRequestBodyIs(json={'tenant_id': '1'})
self.run_command('ec2-credentials-create')
self.assert_called('POST', url)
self.assertRequestBodyIs(json={'tenant_id': self.token.tenant_id})
def test_ec2_credentials_delete(self):
self.stub_url('DELETE',
['users', self.token.user_id,
'credentials', 'OS-EC2', '2'])
self.run_command('ec2-credentials-delete --access 2 --user-id %s' %
self.token.user_id)
url = '/users/%s/credentials/OS-EC2/2' % self.token.user_id
self.assert_called('DELETE', url)
self.run_command('ec2-credentials-delete --access 2')
self.assert_called('DELETE', url)
def test_ec2_credentials_list(self):
self.stub_url('GET',
['users', self.token.user_id, 'credentials', 'OS-EC2'],
json={'credentials': []})
self.run_command('ec2-credentials-list --user-id %s'
% self.token.user_id)
url = '/users/%s/credentials/OS-EC2' % self.token.user_id
self.assert_called('GET', url)
self.run_command('ec2-credentials-list')
self.assert_called('GET', url)
def test_ec2_credentials_get(self):
self.stub_url('GET',
['users', '1', 'credentials', 'OS-EC2', '2'],
json={'credential': {}})
self.run_command('ec2-credentials-get --access 2 --user-id 1')
self.assert_called('GET', '/users/1/credentials/OS-EC2/2')
def test_bootstrap(self):
user = {'user': {'id': '1'}}
role = {'role': {'id': '1'}}
tenant = {'tenant': {'id': '1'}}
token = fixture.V2Token(user_id=1, tenant_id=1)
token.add_role(id=1)
svc = token.add_service('identity')
svc.add_endpoint(public=DEFAULT_AUTH_URL,
admin=DEFAULT_ADMIN_URL)
self.stub_auth(json=token)
self.stub_url('POST', ['OS-KSADM', 'roles'], json=role)
self.stub_url('GET', ['OS-KSADM', 'roles', '1'], json=role)
self.stub_url('POST', ['tenants'], json=tenant)
self.stub_url('GET', ['tenants', '1'], json=tenant)
self.stub_url('POST', ['users'], json=user)
self.stub_url('GET', ['users', '1'], json=user)
self.stub_url('PUT',
['tenants', '1', 'users', '1', 'roles', 'OS-KSADM', '1'],
json=role)
self.run_command('bootstrap --user-name new-user'
' --pass 1 --role-name admin'
' --tenant-name new-tenant')
def called_anytime(method, path, json=None):
test_url = self.TEST_URL.strip('/')
for r in self.requests_mock.request_history:
if not r.method == method:
continue
if not r.url == test_url + path:
continue
if json:
json_body = jsonutils.loads(r.body)
if not json_body == json:
continue
return True
raise AssertionError('URL never called')
called_anytime('POST', '/users', {'user': {'email': None,
'password': '1',
'enabled': True,
'name': 'new-user',
'tenantId': None}})
called_anytime('POST', '/tenants', {"tenant": {"enabled": True,
"name": "new-tenant",
"description": None}})
called_anytime('POST', '/OS-KSADM/roles',
{"role": {"name": "admin"}})
called_anytime('PUT', '/tenants/1/users/1/roles/OS-KSADM/1')
def test_bash_completion(self):
self.run_command('bash-completion')
def test_help(self):
out = self.run_command('help')
required = 'usage: keystone'
self.assertThat(out, matchers.MatchesRegex(required))
def test_password_update(self):
self.stub_url('PATCH',
['OS-KSCRUD', 'users', self.token.user_id],
base_url=DEFAULT_AUTH_URL)
self.run_command('password-update --current-password oldpass'
' --new-password newpass')
self.assert_called('PATCH',
'/OS-KSCRUD/users/%s' % self.token.user_id,
base_url=DEFAULT_AUTH_URL)
self.assertRequestBodyIs(json={'user': {'original_password': 'oldpass',
'password': 'newpass'}})
def test_endpoint_create(self):
self.stub_url('GET', ['OS-KSADM', 'services', '1'],
json={'OS-KSADM:service': {'id': '1'}})
self.stub_url('POST', ['endpoints'], json={'endpoint': {}})
self.run_command('endpoint-create --service-id 1 '
'--publicurl=http://example.com:1234/go')
self.assert_called('POST', '/endpoints')
json = {'endpoint': {'adminurl': None,
'service_id': '1',
'region': 'regionOne',
'internalurl': None,
'publicurl': "http://example.com:1234/go"}}
self.assertRequestBodyIs(json=json)
def test_endpoint_list(self):
self.stub_url('GET', ['endpoints'], json={'endpoints': []})
self.run_command('endpoint-list')
self.assert_called('GET', '/endpoints')
| |
from __future__ import print_function
from kivy.uix.screenmanager import Screen
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient import errors
from kivy.properties import ListProperty
from apiclient.http import MediaFileUpload
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
__all__ = []
__version__ = '0.0'
class GoogleLinkScreen(Screen):
pass
class GoogleLink(object):
# https://developers.google.com/drive/v3/web/quickstart/python
_API_KEY = ''
_CLIENT_SECRET_FILE = './rsc/client_secrets.json'
APPLICATION_NAME = ''
LOCAL_STORAGE_PATH = ''
_CREDENTIAL_PATH = '.credentials/'
SCOPE = ''
settings = {}
service = None
# def __init__(self, **kwargs):
# super(GoogleLink, self).__init__(**kwargs)
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
# Authorize server-to-server interactions from Google Compute Engine.
credential_dir = self.LOCAL_STORAGE_PATH + self._CREDENTIAL_PATH
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
self.APPLICATION_NAME + '.json')
store = Storage(credential_path)
credentials = False
try:
credentials = store.get()
except UserWarning:
pass
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self._CLIENT_SECRET_FILE, self.SCOPE)
flow.user_agent = self.APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def _settings(self, settings):
self.LOCAL_STORAGE_PATH = settings.get('LOCAL_STORAGE_PATH', '')
self.SCOPE = settings.get('SCOPE', '')
self.APPLICATION_NAME = settings.get('APPLICATION_NAME', '')
self._API_KEY = settings.get('API_KEY', '')
def _open_service(self):
# OAuth 2.0 for Mobile & Desktop Apps
# https://developers.google.com/identity/protocols/OAuth2InstalledApp
# Try Sign-In for iOS
# https://developers.google.com/identity/sign-in/ios/start?configured=true
# Enable Google Services for your App
# https://developers.google.com/mobile/add?platform=ios&cntapi=signin&cntapp=Default%20Demo%20App&cntpkg=com.google.samples.quickstart.SignInExample&cnturl=https:%2F%2Fdevelopers.google.com%2Fidentity%2Fsign-in%2Fios%2Fstart%3Fconfigured%3Dtrue&cntlbl=Continue%20with%20Try%20Sign-In
# Add Google Sign-In to Your iOS App
# https://developers.google.com/identity/sign-in/ios/
#https://stackoverflow.com/questions/46717454/which-library-i-should-use-to-obtain-access-token
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('drive', 'v3', http=http)
#self.service = discovery.build('drive', 'v3', developerKey=self._API_KEY)
return self.service
def open_service(self, settings):
self._settings(settings)
if any([setting == '' for setting in settings]):
raise ValueError('Incorrect Google Link Settings')
else:
return self._open_service()
def add_file(self, filename, application='json'):
file_metadata = {'name': filename,
'parents': ['appDataFolder']}
file = os.path.join(self.LOCAL_STORAGE_PATH, filename)
media = MediaFileUpload(file,
mimetype='application/' + application,
resumable=True)
return self.service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
def delete_file(self, file_id):
"""Permanently delete a file, skipping the trash.
Args:
service: Drive API service instance.
file_id: ID of the file to delete.
"""
try:
self.service.files().delete(fileId=file_id).execute()
except errors.HttpError as error:
print('An error occurred: %s' % error)
def server_folder_list(self):
for file in self.get_folder_list():
print('Found file: %s (%s)' % (file.get('name'), file.get('id')))
def get_folder_list(self):
response = self.service.files().list(spaces='appDataFolder',
fields='nextPageToken, files(id, name)',
pageSize=10).execute()
return response.get('files', [])
def update_file(self, file_id, new_filename):
new_filename = self.LOCAL_STORAGE_PATH + new_filename
try:
# First retrieve the file from the API.
file = self.service.files().get(fileId=file_id).execute()
# File's new content.
media_body = MediaFileUpload(
new_filename, resumable=True)
# Send the request to the API.
updated_file = self.service.files().update_panel_display(
fileId=file_id,
body=file,
media_body=media_body).execute()
return updated_file
except errors.HttpError as error:
print('An error occurred: %s' % error)
return None
def update_file(self, file_id, new_title, new_description, new_mime_type,
new_filename, new_revision):
"""Update an existing file's metadata and content.
Args:
service: Drive API service instance.
file_id: ID of the file to update.
new_title: New title for the file.
new_description: New description for the file.
new_mime_type: New MIME type for the file.
new_filename: Filename of the new content to upload.
new_revision: Whether or not to create a new revision for this file.
Returns:
Updated file metadata if successful, None otherwise.
"""
try:
# First retrieve the file from the API.
file = self.service.files().get(fileId=file_id).execute()
# File's new metadata.
file['title'] = new_title
file['description'] = new_description
file['mimeType'] = new_mime_type
# File's new content.
media_body = MediaFileUpload(
new_filename, mimetype=new_mime_type, resumable=True)
# Send the request to the API.
updated_file = self.service.files().update_panel_display(
fileId=file_id,
body=file,
newRevision=new_revision,
media_body=media_body).execute()
return updated_file
except errors.HttpError as error:
print('An error occurred: %s' % error)
return None
if __name__ == '__main__':
gc = GoogleLink()
set = {'LOCAL_STORAGE_PATH': '.local_storage/',
'SCOPE': "https://www.googleapis.com/auth/drive.appdata",
'APPLICATION_NAME': 'Task Manager',
'API_KEY': 'AIzaSyDjQ_pg_ICdC_RenDu2DGmT54XtoYGXQSo'}
gc.open_service(set)
gc.server_folder_list()
| |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file to run attention allocation experiments.
This file replicates experiments done for the ACM FAT* paper
"Fairness is Not Static".
Note this file can take a significant amount of time to run all experiments
since experiments are being repeated multiple times and the results averaged.
To run experiments fewer times, change the experiments.num_runs parameter to
10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import app
from absl import flags
from agents import allocation_agents
from agents import random_agents
from environments import attention_allocation
from experiments import attention_allocation_experiment
from experiments import attention_allocation_experiment_plotting
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', '/tmp/',
'Output directory to write results to.')
def _get_base_env_params():
return attention_allocation.Params(
n_locations=5,
prior_incident_counts=(500, 500, 500, 500, 500),
incident_rates=[8, 6, 4, 3, 1.5],
n_attention_units=6,
miss_incident_prob=(0., 0., 0., 0., 0.),
extra_incident_prob=(0., 0., 0., 0., 0.),
dynamic_rate=0.0)
def _setup_experiment():
return attention_allocation_experiment.Experiment(
num_runs=50,
num_steps=1000,
num_workers=25,
seed=0,
env_class=attention_allocation.LocationAllocationEnv,
env_params=_get_base_env_params())
def _print_discovered_missed_incidents_report(value, report):
discovered_incidents = np.array(report['metrics']['discovered_incidents'])
discovered_total = np.sum(discovered_incidents)
missed_incidents = np.array(
report['metrics']['occurred_incidents']) - np.array(
report['metrics']['discovered_incidents'])
missed_total = np.sum(missed_incidents)
print(
'REPORT dynamic_value: {}\ndiscovered_total: {}\nmissed_total: {}\ndiscovered_locations: {}\nmissed_locations: {}\n'
.format(value, discovered_total, missed_total, discovered_incidents,
missed_incidents))
def mle_greedy_alpha5_agent_resource_all_dynamics():
"""Run experiments on a greedy-epsilon mle agent, epsilon=0.1, across dynamics."""
dynamic_values_to_test = [0.0, 0.01, 0.05, 0.1, 0.15]
experiment = _setup_experiment()
experiment.agent_class = allocation_agents.MLEGreedyAgent
experiment.agent_params = allocation_agents.MLEGreedyAgentParams(
burn_steps=25, window=100, alpha=0.75)
reports_dict = {}
for value in dynamic_values_to_test:
print('Running an experiment...')
experiment.env_params.dynamic_rate = value
json_report = attention_allocation_experiment.run(experiment)
report = json.loads(json_report)
print('\n\nMLE Greedy Fair Agent, 6 attention units, alpha=0.75')
_print_discovered_missed_incidents_report(value, report)
output_filename = 'mle_greedy_fair_alpha75_6units_%f.json' % value
with open(os.path.join(FLAGS.output_dir, output_filename), 'w') as f:
json.dump(report, f)
reports_dict[value] = json_report
return reports_dict
def mle_greedy_agent_resource_all_dynamics():
"""Run experiments on a greedy-epsilon mle agent, epsilon=0.1, across dynamics."""
dynamic_values_to_test = [0.0, 0.01, 0.05, 0.1, 0.15]
experiment = _setup_experiment()
experiment.agent_class = allocation_agents.MLEGreedyAgent
experiment.agent_params = allocation_agents.MLEGreedyAgentParams(
burn_steps=25, window=100)
reports_dict = {}
for value in dynamic_values_to_test:
print('Running an experiment...')
experiment.env_params.dynamic_rate = value
json_report = attention_allocation_experiment.run(experiment)
report = json.loads(json_report)
print('\n\nMLE Greedy Agent, 6 attention units')
_print_discovered_missed_incidents_report(value, report)
output_filename = 'mle_greedy_6units_%f.json' % value
with open(os.path.join(FLAGS.output_dir, output_filename), 'w') as f:
json.dump(report, f)
reports_dict[value] = json_report
return reports_dict
def uniform_agent_resource_all_dynamics():
"""Run experiments on a uniform agent across dynamic rates."""
dynamic_values_to_test = [0.0, 0.01, 0.05, 0.1, 0.15]
experiment = _setup_experiment()
experiment.agent_class = random_agents.RandomAgent
reports_dict = {}
for value in dynamic_values_to_test:
print('Running an experiment...')
experiment.env_params.dynamic_rate = value
json_report = attention_allocation_experiment.run(experiment)
report = json.loads(json_report)
print('\n\nUniform Random Agent, 6 attention units')
_print_discovered_missed_incidents_report(value, report)
output_filename = 'uniform_6units_%f.json' % value
with open(os.path.join(FLAGS.output_dir, output_filename), 'w') as f:
json.dump(report, f)
reports_dict[value] = json_report
return reports_dict
def mle_agent_epsilon_1_resource_all_dynamics():
"""Run experiments on a greedy-epsilon mle agent, epsilon=0.1, across dynamics."""
dynamic_values_to_test = [0.0, 0.01, 0.05, 0.1, 0.15]
experiment = _setup_experiment()
experiment.agent_class = allocation_agents.MLEProbabilityMatchingAgent
experiment.agent_params = allocation_agents.MLEProbabilityMatchingAgentParams(
)
experiment.agent_params.burn_steps = 25
experiment.agent_params.window = 100
reports_dict = {}
for value in dynamic_values_to_test:
print('Running an experiment...')
experiment.env_params.dynamic_rate = value
json_report = attention_allocation_experiment.run(experiment)
report = json.loads(json_report)
print('\n\nMLE Agent, 6 attention units, epsilon=0.1')
_print_discovered_missed_incidents_report(value, report)
output_filename = 'mle_epsilon.1_6units_%f.json' % value
with open(os.path.join(FLAGS.output_dir, output_filename), 'w') as f:
json.dump(report, f)
reports_dict[value] = json_report
return reports_dict
def mle_agent_epsilon_5_resource_all_dynamics():
"""Run experiments on a greedy-epsilon mle agent, epsilon=0.6, across dynamics."""
dynamic_values_to_test = [0.0, 0.01, 0.05, 0.1, 0.15]
experiment = _setup_experiment()
experiment.agent_class = allocation_agents.MLEProbabilityMatchingAgent
experiment.agent_params = allocation_agents.MLEProbabilityMatchingAgentParams(
)
experiment.agent_params.burn_steps = 25
experiment.agent_params.epsilon = 0.5
experiment.agent_params.window = 100
reports_dict = {}
for value in dynamic_values_to_test:
experiment.env_params.dynamic_rate = value
json_report = attention_allocation_experiment.run(experiment)
report = json.loads(json_report)
print('\n\nMLE Agent, 6 attention units, epsilon=0.5')
_print_discovered_missed_incidents_report(value, report)
output_filename = 'mle_epsilon.5_6units_%f.json' % value
with open(os.path.join(FLAGS.output_dir, output_filename), 'w') as f:
json.dump(report, f)
reports_dict[value] = json_report
return reports_dict
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
greedy_fair_reports = mle_greedy_alpha5_agent_resource_all_dynamics()
greedy_reports = mle_greedy_agent_resource_all_dynamics()
uniform_reports = uniform_agent_resource_all_dynamics()
mle1_reports = mle_agent_epsilon_1_resource_all_dynamics()
mle5_reports = mle_agent_epsilon_5_resource_all_dynamics()
agent_names = [
'purely greedy', 'greedy alpha=0.75', 'uniform',
'proportional epsilon=0.1', 'proportional epsilon=0.5'
]
dataframe = attention_allocation_experiment_plotting.create_dataframe_from_results(
agent_names, [
greedy_reports, greedy_fair_reports, uniform_reports, mle1_reports,
mle5_reports
])
loc_dataframe = attention_allocation_experiment_plotting.create_dataframe_from_results(
agent_names, [
greedy_reports, greedy_fair_reports, uniform_reports, mle1_reports,
mle5_reports
],
separate_locations=True)
attention_allocation_experiment_plotting.plot_discovered_missed_clusters(
loc_dataframe,
os.path.join(FLAGS.output_dir, 'dynamic_rate_across_agents_locations'))
attention_allocation_experiment_plotting.plot_total_miss_discovered(
dataframe, os.path.join(FLAGS.output_dir, 'dynamic_rate_across_agents'))
attention_allocation_experiment_plotting.plot_discovered_occurred_ratio_locations(
loc_dataframe,
os.path.join(FLAGS.output_dir, 'discovered_to_occurred_locations'))
attention_allocation_experiment_plotting.plot_discovered_occurred_ratio_range(
dataframe, os.path.join(FLAGS.output_dir, 'discovered_to_occurred_range'))
attention_allocation_experiment_plotting.plot_occurence_action_single_dynamic(
json.loads(greedy_reports[0.1]),
os.path.join(FLAGS.output_dir, 'greedy_incidents_actions_over_time'))
attention_allocation_experiment_plotting.plot_occurence_action_single_dynamic(
json.loads(greedy_fair_reports[0.1]),
os.path.join(FLAGS.output_dir, 'greedy_fair_incidents_actions_over_time'))
attention_allocation_experiment_plotting.plot_occurence_action_single_dynamic(
json.loads(uniform_reports[0.1]),
os.path.join(FLAGS.output_dir, 'uniform_incidents_actions_over_time'))
attention_allocation_experiment_plotting.plot_occurence_action_single_dynamic(
json.loads(mle1_reports[0.1]),
os.path.join(FLAGS.output_dir,
'proportional_incidents_actions_over_time'))
if __name__ == '__main__':
app.run(main)
| |
# coding=utf-8
from collections import defaultdict
import logging
import gzip
import json
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
import networkx as nx
from discoutils.tokens import Token
try:
import xml.etree.cElementTree as ET
except ImportError:
logging.warning('cElementTree not available')
import xml.etree.ElementTree as ET
# copied from feature extraction toolkit
pos_coarsification_map = defaultdict(lambda: "UNK")
pos_coarsification_map.update({"JJ": "J",
"JJN": "J",
"JJS": "J",
"JJR": "J",
"VB": "V",
"VBD": "V",
"VBG": "V",
"VBN": "V",
"VBP": "V",
"VBZ": "V",
"NN": "N",
"NNS": "N",
"NNP": "N",
"NPS": "N",
"NP": "N",
"RB": "RB",
"RBR": "RB",
"RBS": "RB",
"DT": "DET",
"WDT": "DET",
"IN": "CONJ",
"CC": "CONJ",
"PRP": "PRON",
"PRP$": "PRON",
"WP": "PRON",
"WP$": "PRON",
".": "PUNCT",
":": "PUNCT",
":": "PUNCT",
"": "PUNCT",
"'": "PUNCT",
"\"": "PUNCT",
"'": "PUNCT",
"-LRB-": "PUNCT",
"-RRB-": "PUNCT",
# the four NE types that FET 0.3.6 may return as PoS tags
# not really needed in the classification pipeline yet
"PERSON": "PERSON",
"LOC": "LOC",
"ORG": "ORG",
"NUMBER": "NUMBER"})
def is_number(s):
"""
Checks if the given string is an int or a float. Numbers with thousands
separators (e.g. "1,000.12") are also recognised. Returns true of the string
contains only digits and punctuation, e.g. 12/23
"""
try:
float(s)
is_float = True
except ValueError:
is_float = False
is_only_digits_or_punct = True
for ch in s:
if ch.isalpha():
is_only_digits_or_punct = False
break
return is_float or is_only_digits_or_punct # or is_int
class BaseTokeniser(object):
def __init__(self, normalise_entities=False, use_pos=True,
coarse_pos=True, lemmatize=True, lowercase=True,
remove_stopwords=False, remove_short_words=False,
remove_long_words=False,
dependency_format='collapsed-ccprocessed'):
self.normalise_entities = normalise_entities
self.use_pos = use_pos
self.coarse_pos = coarse_pos
self.lemmatize = lemmatize
self.lowercase = lowercase
self.remove_stopwords = remove_stopwords
self.remove_short_words = remove_short_words
self.remove_long_words = remove_long_words
self.dependency_format = dependency_format
def tokenize_corpus(self, *args, **kwargs):
raise NotImplementedError
class XmlTokenizer(BaseTokeniser):
def tokenize_corpus(self, file_names, corpus_name):
logging.info('%s running for %s', self.__class__.__name__, corpus_name)
# i is needed to get the ID of the doc in case something goes wrong
trees = []
for (i, x) in enumerate(file_names):
with open(x) as infile:
trees.append(self.tokenize_doc(infile.read()))
return trees
def tokenize_doc(self, doc, **kwargs):
"""
Tokenizes a Stanford Core NLP processed document by parsing the XML and
extracting tokens and their lemmas, with optional lowercasing
If requested, the named entities will be replaced with the respective
type, e.g. PERSON or ORG, otherwise numbers and punctuation will be
canonicalised
:returns: a list of sentence tuple of the form (tokens_list,
(dependency_graph, {token index in sentence -> token object})
)
"""
try:
# tree = ET.fromstring(doc.encode("utf8"))
tree = ET.fromstring(doc)
sentences = []
for sent_element in tree.findall('.//sentence'):
sentences.append(self._process_sentence(sent_element))
except ET.ParseError as e:
logging.error('Parse error %s', e)
pass
# on OSX the .DS_Store file is passed in, if it exists
# just ignore it
return sentences
def _process_sentence(self, tree):
"""
Build a dependency tree (networkx.DiGraph) for a sentence from its corresponding XML tree
:param tree:
:return:
"""
tokens = []
for element in tree.findall('.//token'):
if self.lemmatize:
txt = element.find('lemma').text
else:
txt = element.find('word').text
# check if the token is a number/stopword before things have been done to it
am_i_a_number = is_number(txt)
if self.remove_stopwords and txt.lower() in ENGLISH_STOP_WORDS:
# logging.debug('Tokenizer ignoring stopword %s' % txt)
continue
if self.remove_short_words and len(txt) <= 3:
continue
if self.remove_long_words and len(txt) >= 25:
continue
pos = element.find('POS').text.upper() if self.use_pos else ''
if self.coarse_pos:
pos = pos_coarsification_map[pos.upper()]
if pos == 'PUNCT' or am_i_a_number:
# logging.debug('Tokenizer ignoring stopword %s' % txt)
continue
try:
iob_tag = element.find('NER').text.upper()
except AttributeError:
# logging.error('You have requested named entity '
# 'normalisation, but the input data are '
# 'not annotated for entities')
iob_tag = 'MISSING'
# raise ValueError('Data not annotated for named entities')
if '/' in txt or '_' in txt:
# I use these chars as separators later, remove them now to avoid problems down the line
logging.debug('Funny token found: %s, pos is %s', txt, pos)
continue
if self.lowercase:
txt = txt.lower()
if self.normalise_entities:
if iob_tag != 'O':
txt = '__NER-%s__' % iob_tag
pos = '' # normalised named entities don't need a PoS tag
tokens.append(Token(txt, pos, int(element.get('id')), ner=iob_tag))
token_index = {t.index: t for t in tokens}
# build a graph from the dependency information available in the input
tokens_ids = set(x.index for x in tokens)
dep_tree = nx.DiGraph()
dep_tree.add_nodes_from(tokens)
dependencies = tree.find('.//{}-dependencies'.format(self.dependency_format))
# some file are formatted like so: <basic-dependencies> ... </basic-dependencies>
if not dependencies:
# and some like so: <dependencies type="basic-dependencies"> ... </dependencies>
# if one fails try the other. If that fails too something is wrong- perhaps corpus has not been parsed?
dependencies = tree.find(".//dependencies[@type='{}-dependencies']".format(self.dependency_format))
if dependencies:
for dep in dependencies.findall('.//dep'):
type = dep.get('type')
head = dep.find('governor')
head_idx = int(head.get('idx'))
dependent = dep.find('dependent')
dependent_idx = int(dependent.get('idx'))
if dependent_idx in tokens_ids and head_idx in tokens_ids:
dep_tree.add_edge(token_index[head_idx], token_index[dependent_idx], type=type)
return dep_tree
def __str__(self):
return 'XmlTokenizer:{}'.format(self.important_params)
class ConllTokenizer(XmlTokenizer):
def tokenize_doc(self, doc_content, **kwargs):
sentences, this_sent = [], []
for line in doc_content.split('\n'):
if not line.strip():
sentences.append(self._process_sentence(this_sent))
this_sent = []
else:
this_sent.append(line.strip())
return sentences
def _process_sentence(self, lines):
"""
Build a dependency tree (networkx.DiGraph) for a sentence from its corresponding XML tree
"""
tokens = []
dependencies = {} # (from_idx, to_idx) --> type
for line in lines:
idx, txt, lemma, pos, ner, head_idx, dep_type = line.split('\t')
if self.lemmatize:
txt = lemma
# check if the token is a number/stopword before things have been done to it
am_i_a_number = is_number(txt)
if self.remove_stopwords and txt.lower() in ENGLISH_STOP_WORDS:
# logging.debug('Tokenizer ignoring stopword %s' % txt)
continue
if self.remove_short_words and len(txt) <= 3:
continue
if self.remove_long_words and len(txt) >= 25:
continue
pos = pos.upper() if self.use_pos else ''
if self.coarse_pos:
pos = pos_coarsification_map[pos.upper()]
if pos == 'PUNCT' or am_i_a_number:
# logging.debug('Tokenizer ignoring stopword %s' % txt)
continue
iob_tag = ner.upper()
if '/' in txt or '_' in txt:
# I use these chars as separators later, remove them now to avoid problems down the line
logging.debug('Funny token found: %s, pos is %s', txt, pos)
continue
if self.lowercase:
txt = txt.lower()
if self.normalise_entities:
if iob_tag != 'O':
txt = '__NER-%s__' % iob_tag
pos = '' # normalised named entities don't need a PoS tag
dependencies[(int(idx), int(head_idx))] = dep_type
tokens.append(Token(txt, pos, int(idx), ner=iob_tag))
token_index = {t.index: t for t in tokens}
# build a graph from the dependency information available in the input
tokens_ids = set(x.index for x in tokens)
dep_tree = nx.DiGraph()
dep_tree.add_nodes_from(tokens)
for (dep_idx, head_idx), type in dependencies.items():
if head_idx in tokens_ids and dep_idx in tokens_ids:
dep_tree.add_edge(token_index[head_idx], token_index[dep_idx], type=type)
return dep_tree
# todo json tokenizers probably ignore the short/stopword parameters
class GzippedJsonTokenizer(BaseTokeniser):
def tokenize_corpus(self, tar_file, *args, **kwargs):
logging.info('Compressed JSON tokenizer running for %s', tar_file)
labels, docs = [], []
with gzip.open(tar_file, 'rb') as infile:
for line in infile:
d = json.loads(line.decode('UTF8'))
labels.append(d[0])
docs.append(d[1])
return docs, labels
| |
import math
from datetime import datetime
from rx.observable import Observable
from rx.testing import TestScheduler, ReactiveTest
from rx.disposables import SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
def test_select_throws():
try:
Observable.return_value(1) \
.select(lambda x, y: x) \
.subscribe(lambda x: _raise("ex"))
except RxException:
pass
try:
Observable.throw_exception('ex') \
.select(lambda x, y: x) \
.subscribe(on_error=lambda ex: _raise(ex))
except RxException:
pass
try:
Observable.empty() \
.select(lambda x, y: x) \
.subscribe(lambda x: x, lambda ex: ex, lambda: _raise('ex'))
except RxException:
pass
def subscribe(observer):
_raise('ex')
try:
Observable.create(subscribe) \
.select(lambda x: x).dump() \
.subscribe()
except RxException:
pass
def test_select_disposeinsideselector():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(200, 2), on_next(500, 3), on_next(600, 4))
results = scheduler.create_observer()
d = SerialDisposable()
invoked = 0
def projection(x, *args, **kw):
nonlocal invoked
invoked += 1
if scheduler.clock > 400:
#print("*** Dispose ****")
d.dispose()
return x
d.disposable = xs.select(projection).subscribe(results)
def action(scheduler, state):
return d.dispose()
scheduler.schedule_absolute(ReactiveTest.disposed, action)
scheduler.start()
results.messages.assert_equal(on_next(100, 1), on_next(200, 2))
xs.subscriptions.assert_equal(ReactiveTest.subscribe(0, 500))
assert invoked == 3
def test_select_completed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
invoked = 0
def factory():
def projection(x):
nonlocal invoked
invoked += 1
return x + 1
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 3), on_next(240, 4), on_next(290, 5), on_next(350, 6), on_completed(400))
xs.subscriptions.assert_equal(ReactiveTest.subscribe(200, 400))
assert invoked == 4
def test_select_completed_two():
for i in range(100):
scheduler = TestScheduler()
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection(x):
nonlocal invoked
invoked +=1
return x + 1
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 3), on_next(240, 4), on_next(290, 5), on_next(350, 6), on_completed(400))
xs.subscriptions.assert_equal(subscribe(200, 400))
assert invoked == 4
def test_select_not_completed():
scheduler = TestScheduler()
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5))
def factory():
def projection(x):
nonlocal invoked
invoked += 1
return x + 1
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 3), on_next(240, 4), on_next(290, 5), on_next(350, 6))
xs.subscriptions.assert_equal(subscribe(200, 1000))
assert invoked == 4
def test_select_error():
scheduler = TestScheduler()
ex = 'ex'
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_error(400, ex), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection(x):
nonlocal invoked
invoked += 1
return x + 1
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 3), on_next(240, 4), on_next(290, 5), on_next(350, 6), on_error(400, ex))
xs.subscriptions.assert_equal(subscribe(200, 400))
assert invoked == 4
def test_select_selector_throws():
scheduler = TestScheduler()
invoked = 0
ex = 'ex'
xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection (x):
nonlocal invoked
invoked += 1
if invoked == 3:
raise Exception(ex)
return x + 1
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 3), on_next(240, 4), on_error(290, ex))
xs.subscriptions.assert_equal(subscribe(200, 290))
assert invoked == 3
def test_select_with_index_throws():
try:
return Observable.return_value(1) \
.select(lambda x, index: x) \
.subscribe(lambda x: _raise('ex'))
except RxException:
pass
try:
return Observable.throw_exception('ex') \
.select(lambda x, index: x) \
.subscribe(lambda x: x, lambda ex: _raise(ex))
except RxException:
pass
try:
return Observable.empty() \
.select(lambda x, index: x) \
.subscribe(lambda x: x, lambda ex: _, lambda : _raise('ex'))
except RxException:
pass
try:
return Observable.create(lambda o: _raise('ex')) \
.select(lambda x, index: x) \
.subscribe()
except RxException:
pass
def test_select_with_index_dispose_inside_selector():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 4), on_next(200, 3), on_next(500, 2), on_next(600, 1))
invoked = 0
results = scheduler.create_observer()
d = SerialDisposable()
def projection(x, index):
nonlocal invoked
invoked += 1
if scheduler.clock > 400:
d.dispose()
return x + index * 10
d.disposable = xs.select(projection).subscribe(results)
def action(scheduler, state):
return d.dispose()
scheduler.schedule_absolute(disposed, action)
scheduler.start()
results.messages.assert_equal(on_next(100, 4), on_next(200, 13))
xs.subscriptions.assert_equal(subscribe(0, 500))
assert invoked == 3
def test_select_with_index_completed():
scheduler = TestScheduler()
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection(x, index):
nonlocal invoked
invoked += 1
return (x + 1) + (index * 10)
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 5), on_next(240, 14), on_next(290, 23), on_next(350, 32), on_completed(400))
xs.subscriptions.assert_equal(subscribe(200, 400))
assert invoked == 4
def test_select_with_index_not_completed():
scheduler = TestScheduler()
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1))
def factory():
def projection(x, index):
nonlocal invoked
invoked += 1
return (x + 1) + (index * 10)
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 5), on_next(240, 14), on_next(290, 23), on_next(350, 32))
xs.subscriptions.assert_equal(subscribe(200, 1000))
assert invoked == 4
def test_select_with_index_error():
scheduler = TestScheduler()
ex = 'ex'
invoked = 0
xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1), on_error(400, ex), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection(x, index):
nonlocal invoked
invoked += 1
return (x + 1) + (index * 10)
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 5), on_next(240, 14), on_next(290, 23), on_next(350, 32), on_error(400, ex))
xs.subscriptions.assert_equal(subscribe(200, 400))
assert invoked == 4
def test_select_with_index_selector_throws():
scheduler = TestScheduler()
invoked = 0
ex = 'ex'
xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex'))
def factory():
def projection(x, index):
nonlocal invoked
invoked += 1
if invoked == 3:
raise Exception(ex)
return (x + 1) + (index * 10)
return xs.select(projection)
results = scheduler.start(factory)
results.messages.assert_equal(on_next(210, 5), on_next(240, 14), on_error(290, ex))
xs.subscriptions.assert_equal(subscribe(200, 290))
assert invoked == 3
if __name__ == '__main__':
test_select_throws()
| |
"""
The ``clearsky`` module contains several methods
to calculate clear sky GHI, DNI, and DHI.
"""
import os
from collections import OrderedDict
import calendar
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
from scipy.linalg import hankel
import h5py
from pvlib import atmosphere, tools
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity,
altitude=0, dni_extra=1364., perez_enhancement=False):
'''
Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.
Implements the Ineichen and Perez clear sky model for global
horizontal irradiance (GHI), direct normal irradiance (DNI), and
calculates the clear-sky diffuse horizontal (DHI) component as the
difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A
report on clear sky models found the Ineichen/Perez model to have
excellent performance with a minimal input data set [3].
Default values for monthly Linke turbidity provided by SoDa [4, 5].
Parameters
-----------
apparent_zenith : numeric
Refraction corrected solar zenith angle in degrees.
airmass_absolute : numeric
Pressure corrected airmass.
linke_turbidity : numeric
Linke Turbidity.
altitude : numeric, default 0
Altitude above sea level in meters.
dni_extra : numeric, default 1364
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
perez_enhancement : bool, default False
Controls if the Perez enhancement factor should be applied.
Setting to True may produce spurious results for times when
the Sun is near the horizon and the airmass is high.
See https://github.com/pvlib/pvlib-python/issues/435
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
See also
--------
lookup_linke_turbidity
pvlib.location.Location.get_clearsky
References
----------
.. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for
the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,
2002.
.. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived
Irradiances: Description and Validation", Solar Energy, vol 73, pp.
307-317, 2002.
.. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
.. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained
July 17, 2012).
.. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.
ISES Solar World Congress, June 2003. Goteborg, Sweden.
'''
# ghi is calculated using either the equations in [1] by setting
# perez_enhancement=False (default behavior) or using the model
# in [2] by setting perez_enhancement=True.
# The NaN handling is a little subtle. The AM input is likely to
# have NaNs that we'll want to map to 0s in the output. However, we
# want NaNs in other inputs to propagate through to the output. This
# is accomplished by judicious use and placement of np.maximum,
# np.minimum, and np.fmax
# use max so that nighttime values will result in 0s instead of
# negatives. propagates nans.
cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0)
tl = linke_turbidity
fh1 = np.exp(-altitude/8000.)
fh2 = np.exp(-altitude/1250.)
cg1 = 5.09e-05 * altitude + 0.868
cg2 = 3.92e-05 * altitude + 0.0387
ghi = np.exp(-cg2*airmass_absolute*(fh1 + fh2*(tl - 1)))
# https://github.com/pvlib/pvlib-python/issues/435
if perez_enhancement:
ghi *= np.exp(0.01*airmass_absolute**1.8)
# use fmax to map airmass nans to 0s. multiply and divide by tl to
# reinsert tl nans
ghi = cg1 * dni_extra * cos_zenith * tl / tl * np.fmax(ghi, 0)
# From [1] (Following [2] leads to 0.664 + 0.16268 / fh1)
# See https://github.com/pvlib/pvlib-python/pull/808
b = 0.664 + 0.163/fh1
# BncI = "normal beam clear sky radiation"
bnci = b * np.exp(-0.09 * airmass_absolute * (tl - 1))
bnci = dni_extra * np.fmax(bnci, 0)
# "empirical correction" SE 73, 157 & SE 73, 312.
bnci_2 = ((1 - (0.1 - 0.2*np.exp(-tl))/(0.1 + 0.882/fh1)) /
cos_zenith)
bnci_2 = ghi * np.fmin(np.fmax(bnci_2, 0), 1e20)
dni = np.minimum(bnci, bnci_2)
dhi = ghi - dni*cos_zenith
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads
def lookup_linke_turbidity(time, latitude, longitude, filepath=None,
interp_turbidity=True):
"""
Look up the Linke Turibidity from the ``LinkeTurbidities.h5``
data file supplied with pvlib.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float or int
longitude : float or int
filepath : None or string, default None
The path to the ``.h5`` file.
interp_turbidity : bool, default True
If ``True``, interpolates the monthly Linke turbidity values
found in ``LinkeTurbidities.h5`` to daily values.
Returns
-------
turbidity : Series
"""
# The .h5 file 'LinkeTurbidities.h5' contains a single 2160 x 4320 x 12
# matrix of type uint8 called 'LinkeTurbidity'. The rows represent global
# latitudes from 90 to -90 degrees; the columns represent global longitudes
# from -180 to 180; and the depth (third dimension) represents months of
# the year from January (1) to December (12). To determine the Linke
# turbidity for a position on the Earth's surface for a given month do the
# following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).
# Note that the numbers within the matrix are 20 * Linke Turbidity,
# so divide the number from the file by 20 to get the
# turbidity.
# The nodes of the grid are 5' (1/12=0.0833[arcdeg]) apart.
# From Section 8 of Aerosol optical depth and Linke turbidity climatology
# http://www.meteonorm.com/images/uploads/downloads/ieashc36_report_TL_AOD_climatologies.pdf
# 1st row: 89.9583 S, 2nd row: 89.875 S
# 1st column: 179.9583 W, 2nd column: 179.875 W
if filepath is None:
pvlib_path = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5')
latitude_index = _degrees_to_index(latitude, coordinate='latitude')
longitude_index = _degrees_to_index(longitude, coordinate='longitude')
with h5py.File(filepath, 'r') as lt_h5_file:
lts = lt_h5_file['LinkeTurbidity'][latitude_index, longitude_index]
if interp_turbidity:
linke_turbidity = _interpolate_turbidity(lts, time)
else:
months = time.month - 1
linke_turbidity = pd.Series(lts[months], index=time)
linke_turbidity /= 20.
return linke_turbidity
def _is_leap_year(year):
"""Determine if a year is leap year.
Parameters
----------
year : numeric
Returns
-------
isleap : array of bools
"""
isleap = ((np.mod(year, 4) == 0) &
((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0)))
return isleap
def _interpolate_turbidity(lts, time):
"""
Interpolated monthly Linke turbidity onto daily values.
Parameters
----------
lts : np.array
Monthly Linke turbidity values.
time : pd.DatetimeIndex
Times to be interpolated onto.
Returns
-------
linke_turbidity : pd.Series
The interpolated turbidity.
"""
# Data covers 1 year. Assume that data corresponds to the value at the
# middle of each month. This means that we need to add previous Dec and
# next Jan to the array so that the interpolation will work for
# Jan 1 - Jan 15 and Dec 16 - Dec 31.
lts_concat = np.concatenate([[lts[-1]], lts, [lts[0]]])
# handle leap years
try:
isleap = time.is_leap_year
except AttributeError:
year = time.year
isleap = _is_leap_year(year)
dayofyear = time.dayofyear
days_leap = _calendar_month_middles(2016)
days_no_leap = _calendar_month_middles(2015)
# Then we map the month value to the day of year value.
# Do it for both leap and non-leap years.
lt_leap = np.interp(dayofyear, days_leap, lts_concat)
lt_no_leap = np.interp(dayofyear, days_no_leap, lts_concat)
linke_turbidity = np.where(isleap, lt_leap, lt_no_leap)
linke_turbidity = pd.Series(linke_turbidity, index=time)
return linke_turbidity
def _calendar_month_middles(year):
"""List of middle day of each month, used by Linke turbidity lookup"""
# remove mdays[0] since January starts at mdays[1]
# make local copy of mdays since we need to change
# February for leap years
mdays = np.array(calendar.mdays[1:])
ydays = 365
# handle leap years
if calendar.isleap(year):
mdays[1] = mdays[1] + 1
ydays = 366
middles = np.concatenate(
[[-calendar.mdays[-1] / 2.0], # Dec last year
np.cumsum(mdays) - np.array(mdays) / 2., # this year
[ydays + calendar.mdays[1] / 2.0]]) # Jan next year
return middles
def _degrees_to_index(degrees, coordinate):
"""Transform input degrees to an output index integer. The Linke
turbidity lookup tables have three dimensions, latitude, longitude, and
month. Specify a degree value and either 'latitude' or 'longitude' to get
the appropriate index number for the first two of these index numbers.
Parameters
----------
degrees : float or int
Degrees of either latitude or longitude.
coordinate : string
Specify whether degrees arg is latitude or longitude. Must be set to
either 'latitude' or 'longitude' or an error will be raised.
Returns
-------
index : np.int16
The latitude or longitude index number to use when looking up values
in the Linke turbidity lookup table.
"""
# Assign inputmin, inputmax, and outputmax based on degree type.
if coordinate == 'latitude':
inputmin = 90
inputmax = -90
outputmax = 2160
elif coordinate == 'longitude':
inputmin = -180
inputmax = 180
outputmax = 4320
else:
raise IndexError("coordinate must be 'latitude' or 'longitude'.")
inputrange = inputmax - inputmin
scale = outputmax/inputrange # number of indices per degree
center = inputmin + 1 / scale / 2 # shift to center of index
outputmax -= 1 # shift index to zero indexing
index = (degrees - center) * scale
err = IndexError('Input, %g, is out of range (%g, %g).' %
(degrees, inputmin, inputmax))
# If the index is still out of bounds after rounding, raise an error.
# 0.500001 is used in comparisons instead of 0.5 to allow for a small
# margin of error which can occur when dealing with floating point numbers.
if index > outputmax:
if index - outputmax <= 0.500001:
index = outputmax
else:
raise err
elif index < 0:
if -index <= 0.500001:
index = 0
else:
raise err
# If the index wasn't set to outputmax or 0, round it and cast it as an
# integer so it can be used in integer-based indexing.
else:
index = int(np.around(index))
return index
def haurwitz(apparent_zenith):
'''
Determine clear sky GHI using the Haurwitz model.
Implements the Haurwitz clear sky model for global horizontal
irradiance (GHI) as presented in [1, 2]. A report on clear
sky models found the Haurwitz model to have the best performance
in terms of average monthly error among models which require only
zenith angle [3].
Parameters
----------
apparent_zenith : Series
The apparent (refraction corrected) sun zenith angle
in degrees.
Returns
-------
ghi : DataFrame
The modeled global horizonal irradiance in W/m^2 provided
by the Haurwitz clear-sky model.
References
----------
.. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud
Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945.
.. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of
Meteorology, vol. 3, pp. 123-124, 1946.
.. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
'''
cos_zenith = tools.cosd(apparent_zenith.values)
clearsky_ghi = np.zeros_like(apparent_zenith.values)
cos_zen_gte_0 = cos_zenith > 0
clearsky_ghi[cos_zen_gte_0] = (1098.0 * cos_zenith[cos_zen_gte_0] *
np.exp(-0.059/cos_zenith[cos_zen_gte_0]))
df_out = pd.DataFrame(index=apparent_zenith.index,
data=clearsky_ghi,
columns=['ghi'])
return df_out
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.,
pressure=101325., dni_extra=1364.):
"""
Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation : numeric
The apparent elevation of the sun above the horizon (deg).
aod700 : numeric, default 0.1
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water : numeric, default 1.0
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure : numeric, default 101325.0
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra : numeric, default 1364.0
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] P. Ineichen, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] P. Ineichen, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016).
"""
p = pressure
w = precipitable_water
# algorithm fails for pw < 0.2
w = np.maximum(w, 0.2)
# this algorithm is reasonably fast already, but it could be made
# faster by precalculating the powers of aod700, the log(p/p0), and
# the log(w) instead of repeating the calculations as needed in each
# function
i0p = _calc_i0p(dni_extra, w, aod700, p)
taub = _calc_taub(w, aod700, p)
b = _calc_b(w, aod700)
taug = _calc_taug(w, aod700, p)
g = _calc_g(w, aod700)
taud = _calc_taud(w, aod700, p)
d = _calc_d(aod700, p)
# this prevents the creation of nans at night instead of 0s
# it's also friendly to scalar and series inputs
sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation)))
dni = i0p * np.exp(-taub/sin_elev**b)
ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev
dhi = i0p * np.exp(-taud/sin_elev**d)
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads
def _calc_i0p(i0, w, aod700, p):
"""Calculate the "enhanced extraterrestrial irradiance"."""
p0 = 101325.
io0 = 1.08 * w**0.0051
i01 = 0.97 * w**0.032
i02 = 0.12 * w**0.56
i0p = i0 * (i02*aod700**2 + i01*aod700 + io0 + 0.071*np.log(p/p0))
return i0p
def _calc_taub(w, aod700, p):
"""Calculate the taub coefficient"""
p0 = 101325.
tb1 = 1.82 + 0.056*np.log(w) + 0.0071*np.log(w)**2
tb0 = 0.33 + 0.045*np.log(w) + 0.0096*np.log(w)**2
tbp = 0.0089*w + 0.13
taub = tb1*aod700 + tb0 + tbp*np.log(p/p0)
return taub
def _calc_b(w, aod700):
"""Calculate the b coefficient."""
b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172
b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557
b = b1 * np.log(w) + b0
return b
def _calc_taug(w, aod700, p):
"""Calculate the taug coefficient"""
p0 = 101325.
tg1 = 1.24 + 0.047*np.log(w) + 0.0061*np.log(w)**2
tg0 = 0.27 + 0.043*np.log(w) + 0.0090*np.log(w)**2
tgp = 0.0079*w + 0.1
taug = tg1*aod700 + tg0 + tgp*np.log(p/p0)
return taug
def _calc_g(w, aod700):
"""Calculate the g coefficient."""
g = -0.0147*np.log(w) - 0.3079*aod700**2 + 0.2846*aod700 + 0.3798
return g
def _calc_taud(w, aod700, p):
"""Calculate the taud coefficient."""
# isscalar tests needed to ensure that the arrays will have the
# right shape in the tds calculation.
# there's probably a better way to do this.
if np.isscalar(w) and np.isscalar(aod700):
w = np.array([w])
aod700 = np.array([aod700])
elif np.isscalar(w):
w = np.full_like(aod700, w)
elif np.isscalar(aod700):
aod700 = np.full_like(w, aod700)
# set up nan-tolerant masks
aod700_lt_0p05 = np.full_like(aod700, False, dtype='bool')
np.less(aod700, 0.05, where=~np.isnan(aod700), out=aod700_lt_0p05)
aod700_mask = np.array([aod700_lt_0p05, ~aod700_lt_0p05], dtype=int)
# create tuples of coefficients for
# aod700 < 0.05, aod700 >= 0.05
td4 = 86*w - 13800, -0.21*w + 11.6
td3 = -3.11*w + 79.4, 0.27*w - 20.7
td2 = -0.23*w + 74.8, -0.134*w + 15.5
td1 = 0.092*w - 8.86, 0.0554*w - 5.71
td0 = 0.0042*w + 3.12, 0.0057*w + 2.94
tdp = -0.83*(1+aod700)**(-17.2), -0.71*(1+aod700)**(-15.0)
tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1)
p0 = 101325.
taud = (tds[4]*aod700**4 + tds[3]*aod700**3 + tds[2]*aod700**2 +
tds[1]*aod700 + tds[0] + tds[5]*np.log(p/p0))
# be polite about matching the output type to the input type(s)
if len(taud) == 1:
taud = taud[0]
return taud
def _calc_d(aod700, p):
"""Calculate the d coefficient."""
p0 = 101325.
dp = 1/(18 + 152*aod700)
d = -0.337*aod700**2 + 0.63*aod700 + 0.116 + dp*np.log(p/p0)
return d
def _calc_stats(data, samples_per_window, sample_interval, H):
""" Calculates statistics for each window, used by Reno-style clear
sky detection functions. Does not return the line length statistic
which is provided by _calc_windowed_stat and _line_length.
Calculations are done on a sliding window defined by the Hankel matrix H.
Columns in H define the indices for each window. Each window contains
samples_per_window index values. The first window starts with index 0;
the last window ends at the last index position in data.
In the calculation of data_slope_nstd, a choice is made here where [1]_ is
ambiguous. data_slope_nstd is the standard deviation of slopes divided by
the mean GHI for each interval; see [1]_ Eq. 11. For intervals containing
e.g. 10 values, there are 9 slope values in the standard deviation, and the
mean is calculated using all 10 values. Eq. 11 in [1]_ is ambiguous if
the mean should be calculated using 9 points (left ends of each slope)
or all 10 points.
Parameters
----------
data : Series
samples_per_window : int
Number of data points in each window
sample_interval : float
Time in minutes in each sample interval
H : 2D ndarray
Hankel matrix defining the indices for each window.
Returns
-------
data_mean : Series
mean of data in each window
data_max : Series
maximum of data in each window
data_slope_nstd : Series
standard deviation of difference between data points in each window
data_slope : Series
difference between successive data points
References
----------
.. [1] Reno, M.J. and C.W. Hansen, "Identification of periods of clear
sky irradiance in time series of GHI measurements" Renewable Energy,
v90, p. 520-531, 2016.
"""
data_mean = data.values[H].mean(axis=0)
data_mean = _to_centered_series(data_mean, data.index, samples_per_window)
data_max = data.values[H].max(axis=0)
data_max = _to_centered_series(data_max, data.index, samples_per_window)
# shift to get forward difference, .diff() is backward difference instead
data_diff = data.diff().shift(-1)
data_slope = data_diff / sample_interval
data_slope_nstd = _slope_nstd_windowed(data_slope.values[:-1], data, H,
samples_per_window, sample_interval)
data_slope_nstd = data_slope_nstd
return data_mean, data_max, data_slope_nstd, data_slope
def _slope_nstd_windowed(slopes, data, H, samples_per_window, sample_interval):
with np.errstate(divide='ignore', invalid='ignore'):
nstd = slopes[H[:-1, ]].std(ddof=1, axis=0) \
/ data.values[H].mean(axis=0)
return _to_centered_series(nstd, data.index, samples_per_window)
def _max_diff_windowed(data, H, samples_per_window):
raw = np.diff(data)
raw = np.abs(raw[H[:-1, ]]).max(axis=0)
return _to_centered_series(raw, data.index, samples_per_window)
def _line_length_windowed(data, H, samples_per_window,
sample_interval):
raw = np.sqrt(np.diff(data)**2. + sample_interval**2.)
raw = np.sum(raw[H[:-1, ]], axis=0)
return _to_centered_series(raw, data.index, samples_per_window)
def _to_centered_series(vals, idx, samples_per_window):
vals = np.pad(vals, ((0, len(idx) - len(vals)),), mode='constant',
constant_values=np.nan)
shift = samples_per_window // 2 # align = 'center' only
return pd.Series(index=idx, data=vals).shift(shift)
def _get_sample_intervals(times, win_length):
""" Calculates time interval and samples per window for Reno-style clear
sky detection functions
"""
deltas = np.diff(times.values) / np.timedelta64(1, '60s')
# determine if we can proceed
if times.inferred_freq and len(np.unique(deltas)) == 1:
sample_interval = times[1] - times[0]
sample_interval = sample_interval.seconds / 60 # in minutes
samples_per_window = int(win_length / sample_interval)
return sample_interval, samples_per_window
else:
raise NotImplementedError('algorithm does not yet support unequal '
'times. consider resampling your data.')
def _clear_sample_index(clear_windows, samples_per_window, align, H):
"""
Returns indices of clear samples in clear windows
"""
# H contains indices for each window, e.g. indices for the first window
# are in first column of H.
# clear_windows contains one boolean for each window and is aligned
# by 'align', default to center
# shift clear_windows.index to be aligned left (e.g. first value in the
# left-most position) to line up with the first column of H.
# commented if/else block for future align='left', 'right' capability
# if align == 'right':
# shift = 1 - samples_per_window
# elif align == 'center':
# shift = - (samples_per_window // 2)
# else:
# shift = 0
shift = -(samples_per_window // 2)
idx = clear_windows.shift(shift)
# drop rows at the end corresponding to windows past the end of data
idx = idx.drop(clear_windows.index[1 - samples_per_window:])
idx = idx.astype(bool) # shift changed type to object
clear_samples = np.unique(H[:, idx])
return clear_samples
def detect_clearsky(measured, clearsky, times=None, window_length=10,
mean_diff=75, max_diff=75,
lower_line_length=-5, upper_line_length=10,
var_diff=0.005, slope_dev=8, max_iterations=20,
return_components=False):
"""
Detects clear sky times according to the algorithm developed by Reno
and Hansen for GHI measurements. The algorithm [1]_ was designed and
validated for analyzing GHI time series only. Users may attempt to
apply it to other types of time series data using different filter
settings, but should be skeptical of the results.
The algorithm detects clear sky times by comparing statistics for a
measured time series and an expected clearsky time series.
Statistics are calculated using a sliding time window (e.g., 10
minutes). An iterative algorithm identifies clear periods, uses the
identified periods to estimate bias in the clearsky data, scales the
clearsky data and repeats.
Clear times are identified by meeting 5 criteria. Default values for
these thresholds are appropriate for 10 minute windows of 1 minute
GHI data.
Parameters
----------
measured : array or Series
Time series of measured GHI. [W/m2]
clearsky : array or Series
Time series of the expected clearsky GHI. [W/m2]
times : DatetimeIndex or None, default None.
Times of measured and clearsky values. If None the index of measured
will be used.
window_length : int, default 10
Length of sliding time window in minutes. Must be greater than 2
periods.
mean_diff : float, default 75
Threshold value for agreement between mean values of measured
and clearsky in each interval, see Eq. 6 in [1]. [W/m2]
max_diff : float, default 75
Threshold value for agreement between maxima of measured and
clearsky values in each interval, see Eq. 7 in [1]. [W/m2]
lower_line_length : float, default -5
Lower limit of line length criterion from Eq. 8 in [1].
Criterion satisfied when lower_line_length < line length difference
< upper_line_length.
upper_line_length : float, default 10
Upper limit of line length criterion from Eq. 8 in [1].
var_diff : float, default 0.005
Threshold value in Hz for the agreement between normalized
standard deviations of rate of change in irradiance, see Eqs. 9
through 11 in [1].
slope_dev : float, default 8
Threshold value for agreement between the largest magnitude of
change in successive values, see Eqs. 12 through 14 in [1].
max_iterations : int, default 20
Maximum number of times to apply a different scaling factor to
the clearsky and redetermine clear_samples. Must be 1 or larger.
return_components : bool, default False
Controls if additional output should be returned. See below.
Returns
-------
clear_samples : array or Series
Boolean array or Series of whether or not the given time is
clear. Return type is the same as the input type.
components : OrderedDict, optional
Dict of arrays of whether or not the given time window is clear
for each condition. Only provided if return_components is True.
alpha : scalar, optional
Scaling factor applied to the clearsky_ghi to obtain the
detected clear_samples. Only provided if return_components is
True.
Raises
------
ValueError
If measured is not a Series and times is not provided
NotImplementedError
If timestamps are not equally spaced
References
----------
.. [1] Reno, M.J. and C.W. Hansen, "Identification of periods of clear
sky irradiance in time series of GHI measurements" Renewable Energy,
v90, p. 520-531, 2016.
Notes
-----
Initial implementation in MATLAB by Matthew Reno. Modifications for
computational efficiency by Joshua Patrick and Curtis Martin. Ported
to Python by Will Holmgren, Tony Lorenzo, and Cliff Hansen.
Differences from MATLAB version:
* no support for unequal times
* automatically determines sample_interval
* requires a reference clear sky series instead calculating one
from a user supplied location and UTCoffset
* parameters are controllable via keyword arguments
* option to return individual test components and clearsky scaling
parameter
* uses centered windows (Matlab function uses left-aligned windows)
"""
if times is None:
try:
times = measured.index
except AttributeError:
raise ValueError("times is required when measured is not a Series")
# be polite about returning the same type as was input
ispandas = isinstance(measured, pd.Series)
# for internal use, need a Series
if not ispandas:
meas = pd.Series(measured, index=times)
else:
meas = measured
if not isinstance(clearsky, pd.Series):
clear = pd.Series(clearsky, index=times)
else:
clear = clearsky
sample_interval, samples_per_window = _get_sample_intervals(times,
window_length)
# generate matrix of integers for creating windows with indexing
H = hankel(np.arange(samples_per_window),
np.arange(samples_per_window-1, len(times)))
# calculate measurement statistics
meas_mean, meas_max, meas_slope_nstd, meas_slope = _calc_stats(
meas, samples_per_window, sample_interval, H)
meas_line_length = _line_length_windowed(
meas, H, samples_per_window, sample_interval)
# calculate clear sky statistics
clear_mean, clear_max, _, clear_slope = _calc_stats(
clear, samples_per_window, sample_interval, H)
# find a scaling factor for the clear sky time series that minimizes the
# RMSE between the clear times identified in the measured data and the
# scaled clear sky time series. Optimization to determine the scaling
# factor considers all identified clear times, which is different from [1]
# where the scaling factor was determined from clear times on days with
# at least 50% of the day being identified as clear.
alpha = 1
for iteration in range(max_iterations):
scaled_clear = alpha * clear
clear_line_length = _line_length_windowed(
scaled_clear, H, samples_per_window, sample_interval)
line_diff = meas_line_length - clear_line_length
slope_max_diff = _max_diff_windowed(
meas - scaled_clear, H, samples_per_window)
# evaluate comparison criteria
c1 = np.abs(meas_mean - alpha*clear_mean) < mean_diff
c2 = np.abs(meas_max - alpha*clear_max) < max_diff
c3 = (line_diff > lower_line_length) & (line_diff < upper_line_length)
c4 = meas_slope_nstd < var_diff
c5 = slope_max_diff < slope_dev
c6 = (clear_mean != 0) & ~np.isnan(clear_mean)
clear_windows = c1 & c2 & c3 & c4 & c5 & c6
# create array to return
clear_samples = np.full_like(meas, False, dtype='bool')
# find the samples contained in any window classified as clear
idx = _clear_sample_index(clear_windows, samples_per_window, 'center',
H)
clear_samples[idx] = True
# find a new alpha
previous_alpha = alpha
clear_meas = meas[clear_samples]
clear_clear = clear[clear_samples]
def rmse(alpha):
return np.sqrt(np.mean((clear_meas - alpha*clear_clear)**2))
alpha = minimize_scalar(rmse).x
if round(alpha*10000) == round(previous_alpha*10000):
break
else:
import warnings
warnings.warn('rescaling failed to converge after %s iterations'
% max_iterations, RuntimeWarning)
# be polite about returning the same type as was input
if ispandas:
clear_samples = pd.Series(clear_samples, index=times)
if return_components:
components = OrderedDict()
components['mean_diff_flag'] = c1
components['max_diff_flag'] = c2
components['line_length_flag'] = c3
components['slope_nstd_flag'] = c4
components['slope_max_flag'] = c5
components['mean_nan_flag'] = c6
components['windows'] = clear_windows
components['mean_diff'] = np.abs(meas_mean - alpha * clear_mean)
components['max_diff'] = np.abs(meas_max - alpha * clear_max)
components['line_length'] = meas_line_length - clear_line_length
components['slope_nstd'] = meas_slope_nstd
components['slope_max'] = slope_max_diff
return clear_samples, components, alpha
else:
return clear_samples
def bird(zenith, airmass_relative, aod380, aod500, precipitable_water,
ozone=0.3, pressure=101325., dni_extra=1364., asymmetry=0.85,
albedo=0.2):
"""
Bird Simple Clear Sky Broadband Solar Radiation Model
Based on NREL Excel implementation by Daryl R. Myers [1, 2].
Bird and Hulstrom define the zenith as the "angle between a line to
the sun and the local zenith". There is no distinction in the paper
between solar zenith and apparent (or refracted) zenith, but the
relative airmass is defined using the Kasten 1966 expression, which
requires apparent zenith. Although the formulation for calculated
zenith is never explicitly defined in the report, since the purpose
was to compare existing clear sky models with "rigorous radiative
transfer models" (RTM) it is possible that apparent zenith was
obtained as output from the RTM. However, the implentation presented
in PVLIB is tested against the NREL Excel implementation by Daryl
Myers which uses an analytical expression for solar zenith instead
of apparent zenith.
Parameters
----------
zenith : numeric
Solar or apparent zenith angle in degrees - see note above
airmass_relative : numeric
Relative airmass
aod380 : numeric
Aerosol optical depth [cm] measured at 380[nm]
aod500 : numeric
Aerosol optical depth [cm] measured at 500[nm]
precipitable_water : numeric
Precipitable water [cm]
ozone : numeric
Atmospheric ozone [cm], defaults to 0.3[cm]
pressure : numeric
Ambient pressure [Pa], defaults to 101325[Pa]
dni_extra : numeric
Extraterrestrial radiation [W/m^2], defaults to 1364[W/m^2]
asymmetry : numeric
Asymmetry factor, defaults to 0.85
albedo : numeric
Albedo, defaults to 0.2
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi', 'direct_horizontal'`` in [W/m^2].
See also
--------
pvlib.atmosphere.bird_hulstrom80_aod_bb
pvlib.atmosphere.get_relative_airmass
References
----------
.. [1] R. E. Bird and R. L Hulstrom, "A Simplified Clear Sky model for
Direct and Diffuse Insolation on Horizontal Surfaces" SERI Technical
Report SERI/TR-642-761, Feb 1981. Solar Energy Research Institute,
Golden, CO.
.. [2] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", pp. 46-51 CRC Press (2013)
.. [3] `NREL Bird Clear Sky Model <http://rredc.nrel.gov/solar/models/
clearsky/>`_
.. [4] `SERI/TR-642-761 <http://rredc.nrel.gov/solar/pubs/pdfs/
tr-642-761.pdf>`_
.. [5] `Error Reports <http://rredc.nrel.gov/solar/models/clearsky/
error_reports.html>`_
"""
etr = dni_extra # extraradiation
ze_rad = np.deg2rad(zenith) # zenith in radians
airmass = airmass_relative
# Bird clear sky model
am_press = atmosphere.get_absolute_airmass(airmass, pressure)
t_rayleigh = (
np.exp(-0.0903 * am_press ** 0.84 * (
1.0 + am_press - am_press ** 1.01
))
)
am_o3 = ozone*airmass
t_ozone = (
1.0 - 0.1611 * am_o3 * (1.0 + 139.48 * am_o3) ** -0.3034 -
0.002715 * am_o3 / (1.0 + 0.044 * am_o3 + 0.0003 * am_o3 ** 2.0)
)
t_gases = np.exp(-0.0127 * am_press ** 0.26)
am_h2o = airmass * precipitable_water
t_water = (
1.0 - 2.4959 * am_h2o / (
(1.0 + 79.034 * am_h2o) ** 0.6828 + 6.385 * am_h2o
)
)
bird_huldstrom = atmosphere.bird_hulstrom80_aod_bb(aod380, aod500)
t_aerosol = np.exp(
-(bird_huldstrom ** 0.873) *
(1.0 + bird_huldstrom - bird_huldstrom ** 0.7088) * airmass ** 0.9108
)
taa = 1.0 - 0.1 * (1.0 - airmass + airmass ** 1.06) * (1.0 - t_aerosol)
rs = 0.0685 + (1.0 - asymmetry) * (1.0 - t_aerosol / taa)
id_ = 0.9662 * etr * t_aerosol * t_water * t_gases * t_ozone * t_rayleigh
ze_cos = np.where(zenith < 90, np.cos(ze_rad), 0.0)
id_nh = id_ * ze_cos
ias = (
etr * ze_cos * 0.79 * t_ozone * t_gases * t_water * taa *
(0.5 * (1.0 - t_rayleigh) + asymmetry * (1.0 - (t_aerosol / taa))) / (
1.0 - airmass + airmass ** 1.02
)
)
gh = (id_nh + ias) / (1.0 - albedo * rs)
diffuse_horiz = gh - id_nh
# TODO: be DRY, use decorator to wrap methods that need to return either
# OrderedDict or DataFrame instead of repeating this boilerplate code
irrads = OrderedDict()
irrads['direct_horizontal'] = id_nh
irrads['ghi'] = gh
irrads['dni'] = id_
irrads['dhi'] = diffuse_horiz
if isinstance(irrads['dni'], pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads
| |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for the leases API.
"""
from uuid import UUID, uuid4
from twisted.internet import reactor
from twisted.internet.task import deferLater
from twisted.trial.unittest import TestCase
from docker.utils import create_host_config
from ...testtools import random_name, find_free_port
from ..testtools import (
require_cluster, require_moving_backend, create_dataset,
REALISTIC_BLOCKDEVICE_SIZE, get_docker_client,
post_http_server, assert_http_server,
)
from ..scripts import SCRIPTS
class LeaseAPITests(TestCase):
"""
Tests for the leases API.
"""
timeout = 600
def _assert_lease_behavior(self, cluster, operation,
additional_kwargs, state_method):
"""
Assert that leases prevent datasets from being moved or deleted.
* Create a dataset on node1.
* Acquire a lease for dataset on node1.
* Start a container (directly using docker-py) bind mounted to dataset
mount point on node1 and verify that data can be written.
* Stop the container.
* Request a move or delete operation.
* Wait for a short time; enough time for an unexpected unmount to take
place.
* Restart the container and write data to it, to demonstrate that the
dataset is still mounted and writable.
* Stop the container again.
* Release the lease, allowing the previously requested operation to
proceed.
* Wait for the previously requested operation to complete.
:param Cluster cluster: The cluster on which to operate.
:param operation: The ``FlockerClient`` method to call before releasing
the lease.
:param dict additional_kwargs: Any additional arguments to pass to
``operation``.
:param state_method: A callable which returns a ``Deferred`` that fires
when the requested operation has been performed.
:returns: A ``Deferred`` that fires when all the steps have
completed.
"""
container_http_port = 8080
host_http_port = find_free_port()[1]
dataset_id = uuid4()
datasets = []
leases = []
containers = []
client = get_docker_client(cluster, cluster.nodes[0].public_address)
creating_dataset = create_dataset(
self, cluster, maximum_size=REALISTIC_BLOCKDEVICE_SIZE,
dataset_id=dataset_id
)
def get_dataset_state(configured_dataset):
"""
XXX: This shouldn't really be needed because ``create_dataset``
returns ``wait_for_dataset`` which returns the dataset state, but
unfortunately ``wait_for_dataset`` wipes out the dataset path for
comparison purposes.
"""
d = cluster.client.list_datasets_state()
d.addCallback(
lambda dataset_states: [
dataset_state
for dataset_state in dataset_states
if dataset_state.dataset_id == dataset_id
][0]
)
d.addCallback(
lambda dataset: datasets.insert(0, dataset)
)
return d
getting_dataset_state = creating_dataset.addCallback(
get_dataset_state
)
def acquire_lease(ignored):
# Call the API to acquire a lease with the dataset ID.
d = cluster.client.acquire_lease(
dataset_id, UUID(cluster.nodes[0].uuid),
# Lease will never expire
expires=None
)
d.addCallback(lambda lease: leases.insert(0, lease))
return d
acquiring_lease = getting_dataset_state.addCallback(acquire_lease)
def start_http_container(ignored):
"""
Create and start a data HTTP container and clean it up when
the test finishes.
"""
[dataset] = datasets
script = SCRIPTS.child("datahttp.py")
script_arguments = [u"/data"]
docker_arguments = {
"host_config": create_host_config(
binds=["{}:/data".format(dataset.path.path)],
port_bindings={container_http_port: host_http_port}),
"ports": [container_http_port],
"volumes": [u"/data"]}
container = client.create_container(
"python:2.7-slim",
["python", "-c", script.getContent()] + list(script_arguments),
**docker_arguments)
container_id = container["Id"]
containers.insert(0, container_id)
client.start(container=container_id)
self.addCleanup(client.remove_container, container_id, force=True)
starting_http_container = acquiring_lease.addCallback(
start_http_container
)
def write_data(ignored):
"""
Make a POST request to the container, writing some data to the
volume.
"""
data = random_name(self).encode("utf-8")
d = post_http_server(
self, cluster.nodes[0].public_address, host_http_port,
{"data": data}
)
d.addCallback(
lambda _: assert_http_server(
self, cluster.nodes[0].public_address,
host_http_port, expected_response=data
)
)
return d
writing_data = starting_http_container.addCallback(write_data)
def stop_container(ignored):
"""
This ensures Docker hasn't got a lock on the volume that might
prevent it being moved separate to the lock held by the lease.
"""
[container_id] = containers
client.stop(container_id)
stopping_container = writing_data.addCallback(stop_container)
def perform_operation(ignored):
return operation(
dataset_id=dataset_id, **additional_kwargs
)
performing_operation = stopping_container.addCallback(
perform_operation
)
def wait_for_unexpected_umount(ignored):
"""
If a bug or error in the dataset agent is causing it to not
respect leases, then we expect that 10 seconds is long enough
for it to begin performing the requested (move or delete a
dataset) operation.
The first step of such an operation will always be to unmount
the filesystem, which should happen quickly since there are no
open files and no Docker bind mounts to the filesystem.
Therefore if after 10 seconds, we can restart the container and
successfully write some data to it, we can conclude that the
dataset agent has respected the lease and not attempted to
unmount.
"""
return deferLater(reactor, 10, lambda: None)
waiting = performing_operation.addCallback(wait_for_unexpected_umount)
def restart_container(ignored):
[container_id] = containers
client.start(container=container_id)
restarting_container = waiting.addCallback(
restart_container
)
writing_data = restarting_container.addCallback(write_data)
stopping_container = writing_data.addCallback(stop_container)
def release_lease(ignored):
return cluster.client.release_lease(dataset_id)
releasing_lease = stopping_container.addCallback(release_lease)
def wait_for_operation(ignored):
"""
Now we've released the lease and stopped the running container, our
earlier move / delete request should be enacted.
"""
[dataset] = datasets
return state_method(dataset)
waiting_for_operation = releasing_lease.addCallback(wait_for_operation)
return waiting_for_operation
@require_moving_backend
@require_cluster(2)
def test_lease_prevents_move(self, cluster):
"""
A dataset cannot be moved if a lease is held on it by a particular
node.
"""
return self._assert_lease_behavior(
cluster=cluster,
operation=cluster.client.move_dataset,
additional_kwargs={'primary': cluster.nodes[1].uuid},
state_method=cluster.wait_for_dataset,
)
@require_moving_backend
@require_cluster(2)
def test_lease_prevents_delete(self, cluster):
"""
A dataset cannot be deleted if a lease is held on it by a particular
node.
"""
return self._assert_lease_behavior(
cluster=cluster,
operation=cluster.client.delete_dataset,
additional_kwargs={},
state_method=cluster.wait_for_deleted_dataset,
)
| |
# Copyright (c) 2011, SD Elements. See LICENSE.txt for details.
import datetime
import json
import time # We monkeypatch this.
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.core.urlresolvers import reverse
from django.forms import ValidationError
from django.http import HttpResponseForbidden, HttpRequest, HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from security.auth import min_length
from security.auth_throttling import (
attempt_count, default_delay_function, delay_message, increment_counters,
reset_counters, Middleware as AuthThrottlingMiddleware
)
from security.middleware import (
BaseMiddleware, ContentSecurityPolicyMiddleware, DoNotTrackMiddleware,
SessionExpiryPolicyMiddleware, MandatoryPasswordChangeMiddleware,
XssProtectMiddleware, XFrameOptionsMiddleware,
)
from security.models import PasswordExpiry
from security.password_expiry import never_expire_password
from security.views import require_ajax, csp_report
from django.conf import settings
def login_user(func):
"""
A decorator that will create a valid user in the database and
then log that user in. We expect self to be a DjangoTestCase,
or some object with a similar interface.
"""
def wrapper(self, *args, **kwargs):
username_local = 'a2fcf54f63993b7'
password_local = 'd8327deb882cf90'
email_local = 'testuser@example.com'
user = User.objects.create_user(
username=username_local,
email=email_local,
password=password_local,
)
user.is_superuser = True
user.save()
PasswordExpiry.objects.create(user=user).never_expire()
self.client.login(username=username_local, password=password_local)
func(self, *args, **kwargs)
self.client.logout()
user.delete()
return wrapper
class CustomLoginURLMiddleware(object):
"""Used to test the custom url support in the login required middleware."""
def process_request(self, request):
request.login_url = '/custom-login/'
class BaseMiddlewareTestMiddleware(BaseMiddleware):
REQUIRED_SETTINGS = ('R1', 'R2')
OPTIONAL_SETTINGS = ('O1', 'O2')
def load_setting(self, setting, value):
if not hasattr(self, 'loaded_settings'):
self.loaded_settings = {}
self.loaded_settings[setting] = value
def process_response(self, request, response):
response.loaded_settings = self.loaded_settings
return response
def process_exception(self, request, exception):
return self.process_response(request, HttpResponse())
class BaseMiddlewareTests(TestCase):
def __init__(self, *args, **kwargs):
super(BaseMiddlewareTests, self).__init__(*args, **kwargs)
module_name = BaseMiddlewareTests.__module__
self.MIDDLEWARE_NAME = module_name + '.BaseMiddlewareTestMiddleware'
def test_settings_initially_loaded(self):
expected_settings = {'R1': 1, 'R2': 2, 'O1': 3, 'O2': 4}
with self.settings(
MIDDLEWARE_CLASSES=(self.MIDDLEWARE_NAME,), **expected_settings
):
response = self.client.get('/home/')
self.assertEqual(expected_settings, response.loaded_settings)
def test_required_settings(self):
with self.settings(MIDDLEWARE_CLASSES=(self.MIDDLEWARE_NAME,)):
self.assertRaises(ImproperlyConfigured, self.client.get, '/home/')
def test_optional_settings(self):
with self.settings(
MIDDLEWARE_CLASSES=(self.MIDDLEWARE_NAME,), R1=True, R2=True
):
response = self.client.get('/home/')
self.assertEqual(None, response.loaded_settings['O1'])
self.assertEqual(None, response.loaded_settings['O2'])
def test_setting_change(self):
with self.settings(
MIDDLEWARE_CLASSES=(self.MIDDLEWARE_NAME,), R1=123, R2=True
):
response = self.client.get('/home/')
self.assertEqual(123, response.loaded_settings['R1'])
with override_settings(R1=456):
response = self.client.get('/home/')
self.assertEqual(456, response.loaded_settings['R1'])
response = self.client.get('/home/')
self.assertEqual(123, response.loaded_settings['R1'])
def test_load_setting_abstract_method(self):
base = BaseMiddleware()
self.assertRaises(NotImplementedError, base.load_setting, None, None)
class LoginRequiredMiddlewareTests(TestCase):
def setUp(self):
self.login_url = reverse("django.contrib.auth.views.login")
def test_aborts_if_auth_middleware_missing(self):
middleware_classes = settings.MIDDLEWARE_CLASSES
auth_mw = 'django.contrib.auth.middleware.AuthenticationMiddleware'
middleware_classes = [
m for m in middleware_classes if m != auth_mw
]
with self.settings(MIDDLEWARE_CLASSES=middleware_classes):
self.assertRaises(ImproperlyConfigured, self.client.get, '/home/')
def test_redirects_unauthenticated_request(self):
response = self.client.get('/home/')
self.assertRedirects(response, self.login_url + "?next=/home/")
def test_redirects_unauthenticated_ajax_request(self):
response = self.client.get(
'/home/',
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
json.loads(response.content.decode('utf-8')),
{"login_url": self.login_url},
)
def test_redirects_to_custom_login_url(self):
middlware_classes = list(settings.MIDDLEWARE_CLASSES)
custom_login_middleware = 'tests.tests.CustomLoginURLMiddleware'
with self.settings(
MIDDLEWARE_CLASSES=[custom_login_middleware] + middlware_classes,
):
response = self.client.get('/home/')
self.assertRedirects(response, '/custom-login/')
response = self.client.get(
'/home/',
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
json.loads(response.content.decode('utf-8')),
{"login_url": '/custom-login/'},
)
def test_logs_out_inactive_users(self):
user = User.objects.create_user(
username="foo",
password="foo",
email="a@foo.org",
)
never_expire_password(user)
self.client.login(username="foo", password="foo")
resp = self.client.get('/home/')
self.assertEqual(resp.status_code, 200) # check we are logged in
user.is_active = False
user.save()
resp = self.client.get('/home/')
self.assertRedirects(resp, self.login_url + "?next=/home/")
class RequirePasswordChangeTests(TestCase):
def test_require_password_change(self):
"""
A brand-new user should have an already-expired password, and therefore
be redirected to the password change form on any request.
"""
user = User.objects.create_user(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
try:
with self.settings(
MANDATORY_PASSWORD_CHANGE={"URL_NAME": "change_password"}
):
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
never_expire_password(user)
self.assertEqual(self.client.get("/home/").status_code, 200)
finally:
self.client.logout()
user.delete()
def test_superuser_password_change(self):
"""
A superuser can be forced to change their password via settings.
"""
user = User.objects.create_superuser(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password"}):
self.assertEqual(self.client.get("/home/").status_code, 200)
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"INCLUDE_SUPERUSERS": True
}):
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
finally:
self.client.logout()
user.delete()
def test_dont_redirect_exempt_urls(self):
user = User.objects.create_user(
username="foo",
password="foo",
email="foo@foo.com"
)
self.client.login(username="foo", password="foo")
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"EXEMPT_URLS": (r'^test1/$', r'^test2/$'),
"EXEMPT_URL_NAMES": ("test3", "test4"),
}):
# Redirect pages in general
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
# Don't redirect the password change page itself
self.assertEqual(
self.client.get(reverse("change_password")).status_code,
200,
)
# Don't redirect exempt urls
self.assertEqual(self.client.get("/test1/").status_code, 200)
self.assertEqual(self.client.get("/test2/").status_code, 200)
self.assertEqual(self.client.get("/test3/").status_code, 200)
self.assertEqual(self.client.get("/test4/").status_code, 200)
finally:
self.client.logout()
user.delete()
def test_dont_choke_on_exempt_urls_that_dont_resolve(self):
user = User.objects.create_user(username="foo",
password="foo",
email="foo@foo.com")
self.client.login(username="foo", password="foo")
try:
with self.settings(MANDATORY_PASSWORD_CHANGE={
"URL_NAME": "change_password",
"EXEMPT_URL_NAMES": ("fake1", "fake2"),
}):
# Redirect pages in general
self.assertRedirects(
self.client.get("/home/"),
reverse("change_password"),
)
finally:
self.client.logout()
user.delete()
def test_raises_improperly_configured(self):
change = MandatoryPasswordChangeMiddleware()
self.assertRaises(
ImproperlyConfigured,
change.load_setting,
'MANDATORY_PASSWORD_CHANGE',
{'EXEMPT_URLS': []},
)
class DecoratorTest(TestCase):
"""
Testing the AJAXView decorator.
"""
def require_ajax_test(self):
@require_ajax
def ajax_only_view(request):
self.assertTrue(request.is_ajax())
request = HttpRequest()
response = ajax_only_view(request)
self.assertTrue(isinstance(response, HttpResponseForbidden))
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = ajax_only_view(request)
self.assertFalse(isinstance(response, HttpResponseForbidden))
class SessionExpiryTests(TestCase):
def test_session_variables_are_set(self):
"""
Verify the session cookie stores the start time and last active time.
"""
self.client.get('/home/')
now = timezone.now()
start_time = self.client.session[
SessionExpiryPolicyMiddleware.START_TIME_KEY
]
last_activity = self.client.session[
SessionExpiryPolicyMiddleware.LAST_ACTIVITY_KEY
]
self.assertTrue(now - start_time < datetime.timedelta(seconds=10))
self.assertTrue(now - last_activity < datetime.timedelta(seconds=10))
def session_expiry_test(self, key, expired):
"""
Verify that expired sessions are cleared from the system. (And that we
redirect to the login page.)
"""
self.assertTrue(self.client.get('/home/').status_code, 200)
session = self.client.session
session[key] = expired
session.save()
response = self.client.get('/home/')
self.assertRedirects(response,
'http://testserver/accounts/login/?next=/home/')
@login_user
def test_session_too_old(self):
"""
Pretend we are 1 second passed the session age time and make sure out
session is cleared.
"""
delta = SessionExpiryPolicyMiddleware().SESSION_COOKIE_AGE + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.session_expiry_test(SessionExpiryPolicyMiddleware.START_TIME_KEY,
expired)
@login_user
def test_session_inactive_too_long(self):
"""
Pretend we are 1 second passed the session inactivity timeout and make
sure the session is cleared.
"""
delta = SessionExpiryPolicyMiddleware().SESSION_INACTIVITY_TIMEOUT + 1
expired = timezone.now() - datetime.timedelta(seconds=delta)
self.session_expiry_test(
SessionExpiryPolicyMiddleware().LAST_ACTIVITY_KEY,
expired,
)
class ConfidentialCachingTests(TestCase):
def setUp(self):
self.old_config = getattr(settings, "NO_CONFIDENTIAL_CACHING", None)
settings.NO_CONFIDENTIAL_CACHING = {
"WHITELIST_ON": False,
"BLACKLIST_ON": False,
"WHITELIST_REGEXES": ["accounts/login/$"],
"BLACKLIST_REGEXES": ["accounts/logout/$"]
}
self.header_values = {
"Cache-Control": 'no-cache, no-store, max-age=0, must-revalidate',
"Pragma": "no-cache",
"Expires": '-1'
}
def tearDown(self):
if self.old_config:
settings.NO_CONFIDENTIAL_CACHING = self.old_config
else:
del(settings.NO_CONFIDENTIAL_CACHING)
def test_whitelisting(self):
settings.NO_CONFIDENTIAL_CACHING["WHITELIST_ON"] = True
# Get Non Confidential Page
response = self.client.get('/accounts/login/')
for header, value in self.header_values.items():
self.assertNotEqual(response.get(header, None), value)
# Get Confidential Page
response = self.client.get("/accounts/logout")
for header, value in self.header_values.items():
self.assertEqual(response.get(header, None), value)
def test_blacklisting(self):
settings.NO_CONFIDENTIAL_CACHING["BLACKLIST_ON"] = True
# Get Non Confidential Page
response = self.client.get('/accounts/login/')
for header, value in self.header_values.items():
self.assertNotEqual(response.get(header, None), value)
# Get Confidential Page
response = self.client.get("/accounts/logout/")
for header, value in self.header_values.items():
self.assertEqual(response.get(header, None), value)
class XFrameOptionsDenyTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-Frame-Options'], settings.X_FRAME_OPTIONS)
def test_exclude_urls(self):
"""
Verify that pages can be excluded from the X-Frame-Options header.
"""
response = self.client.get('/home/')
self.assertEqual(response['X-Frame-Options'], settings.X_FRAME_OPTIONS)
response = self.client.get('/test1/')
self.assertNotIn('X-Frame-Options', response)
def test_improperly_configured(self):
xframe = XFrameOptionsMiddleware()
self.assertRaises(
ImproperlyConfigured,
xframe.load_setting,
'X_FRAME_OPTIONS',
'invalid',
)
self.assertRaises(
ImproperlyConfigured,
xframe.load_setting,
'X_FRAME_OPTIONS_EXCLUDE_URLS',
1,
)
def test_default_exclude_urls(self):
with self.settings(X_FRAME_OPTIONS_EXCLUDE_URLS=None):
# This URL is excluded in other tests, see settings.py
response = self.client.get('/test1/')
self.assertEqual(
response['X-Frame-Options'],
settings.X_FRAME_OPTIONS,
)
def test_default_xframe_option(self):
with self.settings(X_FRAME_OPTIONS=None):
response = self.client.get('/home/')
self.assertEqual(
response['X-Frame-Options'],
'deny',
)
class XXssProtectTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertNotEqual(response['X-XSS-Protection'], None)
def test_default_setting(self):
with self.settings(XSS_PROTECT=None):
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-XSS-Protection'], '1') # sanitize
def test_option_off(self):
with self.settings(XSS_PROTECT='off'):
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-XSS-Protection'], '0') # off
def test_improper_configuration_raises(self):
xss = XssProtectMiddleware()
self.assertRaises(
ImproperlyConfigured,
xss.load_setting,
'XSS_PROTECT',
'invalid',
)
class ContentNoSniffTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(response['X-Content-Options'], 'nosniff')
class StrictTransportSecurityTests(TestCase):
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertNotEqual(response['Strict-Transport-Security'], None)
@override_settings(AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, _: (2 ** (x - 1) if x else 0, 0),
"LOGIN_URLS_WITH_TEMPLATES": [
("accounts/login/", "registration/login.html")
]
})
class AuthenticationThrottlingTests(TestCase):
def setUp(self):
# monkey patch time
self.old_time = time.time
self.time = 0
time.time = lambda: self.time
self.user = User.objects.create_user(username="foo", password="foo",
email="a@foo.org")
def tearDown(self):
time.time = self.old_time
def attempt(self, password):
return self.client.post("/accounts/login/",
{"username": "foo",
"password": password},
follow=True)
def reset(self):
self.client.logout()
cache.clear()
def typo(self):
self.assertTemplateUsed(self.attempt("bar"), "registration/login.html")
def _succeed(self):
self.assertTemplateNotUsed(self.attempt("foo"),
"registration/login.html")
self.reset()
def _fail(self):
self.assertTemplateUsed(self.attempt("foo"), "registration/login.html")
self.reset()
def set_time(self, t):
self.time = t
def test_delay_message(self):
self.assertEqual("0 seconds", delay_message(0))
self.assertEqual("1 second", delay_message(0.1))
self.assertEqual("1 second", delay_message(1))
self.assertEqual("1 minute", delay_message(31))
self.assertEqual("1 minute", delay_message(60))
self.assertEqual("1 minute", delay_message(61))
self.assertEqual("2 minutes", delay_message(90))
self.assertEqual("2 minutes", delay_message(120))
def test_counters(self):
cache.clear()
increment_counters(username="foo", ip="127.0.0.1")
increment_counters(username="foo")
self.assertEqual(attempt_count("username", "foo"), 2)
self.assertEqual(attempt_count("ip", "127.0.0.1"), 1)
self.assertEqual(attempt_count("username", "baz"), 0)
reset_counters(username="foo", ip="127.0.0.1")
self.assertEqual(attempt_count("username", "foo"), 0)
self.assertEqual(attempt_count("ip", "127.0.0.1"), 0)
cache.clear()
def test_default_delay_function(self):
"""
The default function will only delay by looking at the username,
and shouldn't care about ip.
"""
delay = default_delay_function
# 100 repeated IPs doesn't result in a delay.
self.assertEqual(delay(0, 100), (0, 0))
# first 3 incorrect attempts with a username will not be delayed.
for i in range(3):
self.assertEqual(delay(i, 0), (0, 0))
# forth, fifth, sixth attempts are throttled
for i in range(4, 7):
self.assertEqual(delay(i, 0), (5 * 2 ** (i - 3), 0))
# we max out at 24 hours
self.assertEqual(delay(100, 0), (24 * 60 * 60, 0))
def test_per_account_throttling(self):
"""
Tests that multiple attempts on the same account are throttled
according to settings.AUTHENTICATION_THROTTLING.
"""
self.set_time(0)
self._succeed()
self.set_time(0)
self.typo()
self._fail()
self.set_time(0)
self.typo()
self.set_time(1)
self._succeed()
self.set_time(0)
self.typo()
self.set_time(1)
self.typo()
self.set_time(2)
self._fail()
self.set_time(0)
self.typo()
self.set_time(1)
self.typo()
self.set_time(3)
self._succeed()
@override_settings(AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, y: (x, y),
"LOGIN_URLS_WITH_TEMPLATES": [
("accounts/login/", None)
]
})
def test_too_many_requests_error_when_no_template_provided(self):
"""
Verify we simply return a 429 error when there is no login template
provided for us to report an error within.
"""
cache.clear()
# first bad attempt
self.typo()
# second attempt is throttled as per our delay function
response = self.attempt("bar")
self.assertEqual(
response.status_code,
429,
"Expected TooManyRequests Error.",
)
cache.clear()
def test_reset_button(self):
"""
Tests that the account lockout reset button in the admin interface
actually works.
"""
self.set_time(0)
self.typo()
admin = User.objects.create_user(username="bar", password="bar",
email="a@bar.org")
admin.is_superuser = True
admin.save()
self.client.login(username="bar", password="bar")
self.client.post(
reverse("reset_username_throttle", args=[self.user.id]),
)
self.client.logout()
self._succeed()
@override_settings(AUTHENTICATION_THROTTLING={
"DELAY_FUNCTION": lambda x, y: (x, y),
})
def test_improperly_configured_middleware(self):
self.assertRaises(ImproperlyConfigured, AuthThrottlingMiddleware)
def test_throttle_reset_404_on_unauthorized(self):
resp = self.client.post(
reverse("reset_username_throttle", args=[self.user.id]),
)
self.assertEqual(resp.status_code, 404)
def test_throttle_reset_404_on_not_found(self):
admin = User.objects.create_user(
username="bar",
password="bar",
email="a@bar.org",
)
admin.is_superuser = True
admin.save()
self.client.login(username="bar", password="bar")
resp = self.client.post(
reverse("reset_username_throttle", args=[999]),
)
self.assertEqual(resp.status_code, 404)
class P3PPolicyTests(TestCase):
def setUp(self):
self.policy = "NN AD BLAH"
settings.P3P_COMPACT_POLICY = self.policy
def test_p3p_header(self):
expected_header = 'policyref="/w3c/p3p.xml" CP="%s"' % self.policy
response = self.client.get('/accounts/login/')
self.assertEqual(response["P3P"], expected_header)
class AuthTests(TestCase):
def test_min_length(self):
self.assertRaises(ValidationError, min_length(6), "abcde")
min_length(6)("abcdef")
class ContentSecurityPolicyTests(TestCase):
class FakeHttpRequest(object):
method = 'POST'
body = """{
"csp-report": {
"document-uri": "http://example.org/page.html",
"referrer": "http://evil.example.com/haxor.html",
"blocked-uri": "http://evil.example.com/image.png",
"violated-directive": "default-src 'self'",
"original-policy": "%s"
}
}
""" % settings.CSP_STRING
META = {
'CONTENT_TYPE': 'application/json',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_USER_AGENT': 'FakeHTTPRequest'
}
def test_option_set(self):
"""
Verify the HTTP Response Header is set.
"""
response = self.client.get('/accounts/login/')
self.assertEqual(
response['Content-Security-Policy'],
settings.CSP_STRING,
)
def test_json(self):
req = ContentSecurityPolicyTests.FakeHttpRequest()
parsed = json.loads(req.body)
self.assertNotEqual(len(parsed), 0)
# http://www.w3.org/TR/CSP/#sample-violation-report
def test_csp_view(self):
req = ContentSecurityPolicyTests.FakeHttpRequest()
# call the view
resp = csp_report(req)
self.assertEqual(resp.status_code, 204)
def test_csp_gen_1(self):
csp_dict = {
'default-src': ['self', 'cdn.example.com'],
'script-src': ['self', 'js.example.com'],
'style-src': ['self', 'css.example.com'],
'img-src': ['self', 'img.example.com'],
'connect-src': ['self', ],
'font-src': ['fonts.example.com', ],
'object-src': ['self'],
'media-src': ['media.example.com', ],
'frame-src': ['*', ],
'sandbox': ['', ],
'reflected-xss': 'filter',
'referrer': 'origin',
'report-uri': 'http://example.com/csp-report',
}
expected = (
"script-src 'self' js.example.com;"
"default-src 'self' cdn.example.com;"
"img-src 'self' img.example.com;"
"connect-src 'self';"
"reflected-xss filter;"
"style-src 'self' css.example.com;"
"report-uri http://example.com/csp-report;"
"frame-src *;"
"sandbox ;"
"object-src 'self';"
"media-src media.example.com;"
"referrer origin;"
"font-src fonts.example.com"
)
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
# We can't assume the iteration order on the csp_dict, so we split the
# output, sort, and ensure we got all the results back, regardless of
# the order.
expected_list = sorted(x.strip() for x in expected.split(';'))
generated_list = sorted(x.strip() for x in generated.split(';'))
self.assertEqual(generated_list, expected_list)
def test_csp_gen_2(self):
csp_dict = {'default-src': ('none',), 'script-src': ['none']}
expected = "default-src 'none'; script-src 'none'"
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
expected_list = sorted(x.strip() for x in expected.split(';'))
generated_list = sorted(x.strip() for x in generated.split(';'))
self.assertEqual(generated_list, expected_list)
def test_csp_gen_3(self):
csp_dict = {
'script-src': [
'self',
'www.google-analytics.com',
'ajax.googleapis.com',
],
}
expected = (
"script-src "
"'self' www.google-analytics.com ajax.googleapis.com"
)
csp = ContentSecurityPolicyMiddleware()
generated = csp._csp_builder(csp_dict)
self.assertEqual(generated, expected)
def test_csp_gen_err(self):
# argument not passed as array, expect failure
csp_dict = {'default-src': 'self'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err2(self):
csp_dict = {'invalid': 'self'} # invalid directive
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err3(self):
csp_dict = {'sandbox': 'none'} # not a list or tuple, expect failure
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err4(self):
# Not an allowed directive, expect failure
csp_dict = {'sandbox': ('invalid', )}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err5(self):
# Not an allowed directive, expect failure
csp_dict = {'referrer': 'invalid'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_csp_gen_err6(self):
# Not an allowed directive, expect failure
csp_dict = {'reflected-xss': 'invalid'}
csp = ContentSecurityPolicyMiddleware()
self.assertRaises(MiddlewareNotUsed, csp._csp_builder, csp_dict)
def test_enforced_by_default(self):
with self.settings(CSP_MODE=None):
response = self.client.get('/accounts/login/')
self.assertIn('Content-Security-Policy', response)
self.assertNotIn('Content-Security-Policy-Report-Only', response)
def test_enforced_when_on(self):
with self.settings(CSP_MODE='enforce'):
response = self.client.get('/accounts/login/')
self.assertIn('Content-Security-Policy', response)
self.assertNotIn('Content-Security-Policy-Report-Only', response)
def test_report_only_set(self):
with self.settings(CSP_MODE='report-only'):
response = self.client.get('/accounts/login/')
self.assertNotIn('Content-Security-Policy', response)
self.assertIn('Content-Security-Policy-Report-Only', response)
def test_invalid_csp_mode(self):
with self.settings(CSP_MODE='invalid'):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_no_csp_options_set(self):
with self.settings(CSP_DICT=None, CSP_STRING=None):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_both_csp_options_set(self):
with self.settings(CSP_DICT={'x': 'y'}, CSP_STRING='x y;'):
self.assertRaises(
MiddlewareNotUsed,
ContentSecurityPolicyMiddleware,
)
def test_sets_from_csp_dict(self):
with self.settings(
CSP_DICT={'default-src': ('self',)},
CSP_STRING=None,
):
response = self.client.get('/accounts/login/')
self.assertEqual(
response['Content-Security-Policy'],
"default-src 'self'",
)
class DoNotTrackTests(TestCase):
def setUp(self):
self.dnt = DoNotTrackMiddleware()
self.request = HttpRequest()
self.response = HttpResponse()
def test_set_DNT_on(self):
self.request.META['HTTP_DNT'] = '1'
self.dnt.process_request(self.request)
self.assertTrue(self.request.dnt)
def test_set_DNT_off(self):
self.request.META['HTTP_DNT'] = 'off'
self.dnt.process_request(self.request)
self.assertFalse(self.request.dnt)
def test_default_DNT(self):
self.dnt.process_request(self.request)
self.assertFalse(self.request.dnt)
def test_DNT_echo_on(self):
self.request.META['HTTP_DNT'] = '1'
self.dnt.process_response(self.request, self.response)
self.assertIn('DNT', self.response)
self.assertEqual(self.response['DNT'], '1')
def test_DNT_echo_off(self):
self.request.META['HTTP_DNT'] = 'off'
self.dnt.process_response(self.request, self.response)
self.assertEqual(self.response['DNT'], 'off')
def test_DNT_echo_default(self):
self.dnt.process_response(self.request, self.response)
self.assertNotIn('DNT', self.response)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NamespacesOperations(object):
"""NamespacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def check_name_availability_method(
self, name, custom_headers=None, raw=False, **operation_config):
"""Check the give namespace name availability.
:param name: The Name to check the namespce name availability and The
namespace name can contain only letters, numbers, and hyphens. The
namespace must start with a letter, and it must end with a letter or
number.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.CheckNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
parameters = models.CheckNameAvailability(name=name)
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/CheckNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CheckNameAvailability')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the available namespaces within the subscription, irrespective
of the resource groups.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SBNamespace
:rtype:
~azure.mgmt.servicebus.models.SBNamespacePaged[~azure.mgmt.servicebus.models.SBNamespace]
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/namespaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.SBNamespacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SBNamespacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets the available namespaces within a resource group.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SBNamespace
:rtype:
~azure.mgmt.servicebus.models.SBNamespacePaged[~azure.mgmt.servicebus.models.SBNamespace]
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.SBNamespacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SBNamespacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, namespace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a service namespace. Once created, this namespace's
resource manifest is immutable. This operation is idempotent.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param parameters: Parameters supplied to create a namespace resource.
:type parameters: ~azure.mgmt.servicebus.models.SBNamespace
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns SBNamespace
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servicebus.models.SBNamespace]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SBNamespace')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBNamespace', response)
if response.status_code == 201:
deserialized = self._deserialize('SBNamespace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing namespace. This operation also removes all
associated resources under the namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a description for the specified namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBNamespace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBNamespace or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBNamespace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, namespace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a service namespace. Once created, this namespace's resource
manifest is immutable. This operation is idempotent.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param parameters: Parameters supplied to update a namespace resource.
:type parameters:
~azure.mgmt.servicebus.models.SBNamespaceUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBNamespace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBNamespace or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SBNamespaceUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBNamespace', response)
if response.status_code == 201:
deserialized = self._deserialize('SBNamespace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_authorization_rules(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the authorization rules for a namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SBAuthorizationRule
:rtype:
~azure.mgmt.servicebus.models.SBAuthorizationRulePaged[~azure.mgmt.servicebus.models.SBAuthorizationRule]
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.SBAuthorizationRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SBAuthorizationRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update_authorization_rule(
self, resource_group_name, namespace_name, authorization_rule_name, rights, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an authorization rule for a namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param authorization_rule_name: The authorizationrule name.
:type authorization_rule_name: str
:param rights: The rights associated with the rule.
:type rights: list[str or ~azure.mgmt.servicebus.models.AccessRights]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBAuthorizationRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBAuthorizationRule or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
parameters = models.SBAuthorizationRule(rights=rights)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SBAuthorizationRule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBAuthorizationRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_authorization_rule(
self, resource_group_name, namespace_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a namespace authorization rule.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param authorization_rule_name: The authorizationrule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_authorization_rule(
self, resource_group_name, namespace_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets an authorization rule for a namespace by rule name.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param authorization_rule_name: The authorizationrule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBAuthorizationRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBAuthorizationRule or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBAuthorizationRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, namespace_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the primary and secondary connection strings for the namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param authorization_rule_name: The authorizationrule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AccessKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.AccessKeys or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_keys(
self, resource_group_name, namespace_name, authorization_rule_name, key_type, key=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the primary or secondary connection strings for the
namespace.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param authorization_rule_name: The authorizationrule name.
:type authorization_rule_name: str
:param key_type: The access key to regenerate. Possible values
include: 'PrimaryKey', 'SecondaryKey'
:type key_type: str or ~azure.mgmt.servicebus.models.KeyType
:param key: Optional, if the key value provided, is reset for KeyType
value or autogenerate Key value set for keyType
:type key: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AccessKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.AccessKeys or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
parameters = models.RegenerateAccessKeyParameters(key_type=key_type, key=key)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/regenerateKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RegenerateAccessKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
#!/usr/bin/env python
# Copyright (C) 2014 Aldebaran Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This file takes the official URDF aldebaran files and convert them
# to rep120 compliant urdf files. It also includes the meshes from nao_meshes
# package allowing to display the model in RVIZ
#
# authors: Mikael Arguedas [mikael DOT arguedas AT gmail DOT com]
# TODO Get motor information from documentation and generate transmission tags
# automatically
# TODO Generate automatically gazebo tags for every sensor
# TODO Add toe frames for romeo (not supported yet by NAOqi)
from __future__ import print_function
import sys
import argparse
from naoqi_tools.urdf import URDF
import copy
import naoqi_tools.gazeboUrdf
import naoqi_tools.urdf as ur
import naoqi_tools.nao_dictionaries as dico
import subprocess
import os
import math
from xml.dom.minidom import Document
NAO_XACRO_DICO = {
'head': 'gaze',
'legs': 'sole',
'arms': 'gripper',
'torso': 'torso',
}
ROMEO_XACRO_DICO = {
'head': 'gaze',
'legs': 'sole',
'arms': 'gripper',
'torso': 'body',
'eyes': 'Eye',
}
PEPPER_XACRO_DICO = {
'head': 'Head',
'legs': 'base_footprint',
'arms': 'gripper',
'torso': 'torso',
}
COLLISION_SUFFIX = '_0.10.stl'
parser = argparse.ArgumentParser(usage='Load an URDF file')
parser.add_argument('-i', '--input', default='', help='URDF file to load')
parser.add_argument('-r', '--REP120', choices=['true', 'false'],
default='true', help='Rename the links to be REP120 compliant')
parser.add_argument('-x', '--xacro', choices=['urdf', 'robot'],
default='robot', help='Chose robot part to generate. choosing urdf create a'
'single urdf file with the entire robot. robot will OUTPUT a xacro file '
'for every kinematic chain on the robot')
####################
##### FUNCTIONS ####
####################
def define_materials():
"""Create a few materials.
to display geometrical shapes in a given color.
"""
global robot
robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))
robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))
robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))
robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))
def REP120_compatibility():
"""Add frames defined by ROS for humanoid robots.
(REP120): http://www.ros.org/reps/rep-0120.html
"""
# TODO Add toe frames for ROMEO (not supported by NAOqi yet)
global robot, NAME, MESH_VERSION, VERSION, LINKS_DICO, OFFSETS_DICO
print('creating and renaming joints & links to comply to REP120')
# Rename links
for joint in robot.joints:
if robot.joints[joint].name.endswith('_joint'):
robot.joints[joint].name = robot.joints[joint].name[0:-6]
if robot.joints[joint].name.endswith('_actuator'):
robot.joints[joint].name = robot.joints[joint].name[0:-9]
if robot.joints[joint].mimic is not None:
if robot.joints[joint].mimic.joint_name.endswith('_actuator'):
robot.joints[joint].mimic.joint_name = \
robot.joints[joint].mimic.joint_name[0:-9]
if robot.joints[joint].mimic.joint_name.endswith('_joint'):
robot.joints[joint].mimic.joint_name = \
robot.joints[joint].mimic.joint_name[0:-6]
try:
robot.joints[joint].parent = LINKS_DICO[robot.joints[joint].parent]
except KeyError:
pass
try:
robot.joints[joint].child = LINKS_DICO[robot.joints[joint].child]
except KeyError:
pass
for link in robot.links.keys():
try:
robot.rename_link(link, LINKS_DICO[link])
except KeyError, ValueError:
pass
if NAME == 'romeo':
robot.add_link(ur.Link('gaze'))
robot.add_joint(ur.Joint('gaze_joint', 'HeadRoll_link',
'gaze', 'fixed', None, ur.Pose(
(OFFSETS_DICO['CameraLeftEyeOffsetX'], 0,
OFFSETS_DICO['CameraLeftEyeOffsetZ']), (0, 0, 0))))
MESH_VERSION = ''
elif NAME == 'nao':
robot.add_link(ur.Link('gaze'))
robot.add_joint(ur.Joint('gaze_joint', 'Head',
'gaze', 'fixed', None, ur.Pose(
(OFFSETS_DICO['CameraTopV4OffsetX'], 0,
OFFSETS_DICO['CameraTopV4OffsetZ']), (0, 0, 0))))
if VERSION == 'V32':
MESH_VERSION = VERSION
elif VERSION == 'V33' or VERSION == 'V40' or VERSION == 'V50':
MESH_VERSION = 'V40'
elif NAME == 'pepper':
MESH_VERSION = VERSION
# add base_footprint frame
robot.add_link(ur.Link('base_footprint'))
robot.add_joint(ur.Joint('base_footprint_joint', 'Tibia',
'base_footprint', 'fixed', None, ur.Pose(
(OFFSETS_DICO['BaseFootprintOffsetX'],
OFFSETS_DICO['BaseFootprintOffsetY'],
OFFSETS_DICO['BaseFootprintOffsetZ']),
(OFFSETS_DICO['BaseFootprintRotX'],
OFFSETS_DICO['BaseFootprintRotY'],
OFFSETS_DICO['BaseFootprintRotZ']))))
# rename the laser frames to sensor frames
# (they are actually not used for computation)
laser_links = [c for c in robot.links.keys()
if 'surrounding' in c.lower()]
for joint in robot.joints.values():
if joint.child in laser_links:
laser_frame = joint.child
laser_device_frame = laser_frame[:-5] + 'device_frame'
# get the old joint to have the device frame as a child
joint.child = laser_device_frame
# but also create a joint with the projected frame as a child
robot.add_link(ur.Link(laser_device_frame))
joint_new = copy.deepcopy(joint)
joint_new.name = joint.name[:-17] + \
'projected_sensor_fixedjoint'
joint_new.child = laser_frame
joint_new.origin.rotation[0] = 0
joint_new.origin.rotation[1] = 0
# set it on the ground
joint_new.origin.position[2] = -0.334
if 'left' in laser_frame.lower():
# the following line is a temporary fix
# that should be fixed upstream
joint_new.origin.rotation[2] = math.pi/2.0 + \
0.1864836732051034
elif 'right' in laser_frame.lower():
# the following line is a temporary fix
# that should be fixed upstream
joint.origin.position[0] = -0.018
joint_new.origin.position[0] = -0.018
# the following line is a temporary fix
# that should be fixed upstream
joint_new.origin.rotation[2] = -math.pi/2.0 \
- 0.1864836732051034
elif 'front' in laser_frame.lower():
joint_new.origin.rotation[2] = 0
robot.add_joint(joint_new)
# add an optical frame for each robot
camera_frames = [c for c in robot.links.keys() if 'camera' in c.lower()]
for camera_frame in camera_frames:
camera_optical_frame = camera_frame[:-6] + '_optical_frame'
robot.add_link(ur.Link(camera_optical_frame))
robot.add_joint(ur.Joint('%s_fixedjoint' % camera_optical_frame,
camera_frame, camera_optical_frame, 'fixed', None,
ur.Pose((0, 0, 0), (-math.pi/2.0, 0, -math.pi/2.0))))
# add dummy physics for gazebo simulation
add_dummy_inertia(['Finger', 'Thumb', 'gripper', 'Fsr'])
add_dummy_collision(['Fsr'])
def add_transmission_tags():
"""Should instanciate all transmission tags.
- hardware interface
- mechanical reduction ratio for each motor
- joint and actuator to which the transmission tag reference
for now naoTransmission.xacro has been done by hand based on the work
of Konstantinos Chatzilygeroudis in his nao_dcm project
https://github.com/costashatz/nao_dcm
"""
global robot
# TODO create all transmission elements : Cannot get them from the lib for now
return
def add_gazebo_tags():
"""Should instanciate all gazebo tags.
- sensor plugins
- mimic joints plugins
- ros_control plugin
- disable_links plugins
- gazebo reference for every link (ok)
for now naoGazebo.xacro has been done by hand based on the work of
Konstantinos Chatzilygeroudis in his nao_dcm project
https://github.com/costashatz/nao_dcm
"""
global robot
# TODO instantiate plugins according to sensors present in input urdf
return
def add_dummy_inertia(list):
"""Add a dummy Inertial tag to every links containing keyword in list."""
global robot
for string in list:
for link in robot.links:
if robot.links[link].name.find(string) != -1:
robot.links[link].inertial = ur.Inertial(1.1e-9, 0.0, 0.0,
1.1e-9, 0.0, 1.1e-9, 2e-06)
def add_dummy_collision(list):
"""Add a Box collision tag to every links containing keyword in list."""
global robot
for string in list:
for link in robot.links:
if robot.links[link].name.find(string) != -1:
robot.links[link].collision = ur.Collision(
ur.Box([0.01, 0.01, 0.005]),
ur.Pose((0, 0, 0), (0, 0, 0)))
##################
##### Meshes #####
##################
def create_visual_xacro():
"""Create a <ROBOT>_visual_collision.xacro file.
with xacro macros for visual and collision tags of the urdf.
This function creates xacro macros for visualisation and collisions.
It checks if the meshes are on the computer and set the 'meshes_installed'
property accordingly
"""
global robot
global OUTPUT
global MESH_VERSION
global NAME
global LINKS_DICO
global VISU_DICO
global OFFSETS_DICO
global MESHPKG
prefix = 'insert_visu_'
doc = Document()
root = doc.createElement('robot')
doc.appendChild(root)
root.setAttribute("xmlns:xacro", "http://www.ros.org/wiki/xacro")
cmd = 'rospack find ' + NAME + MESHPKG
try:
path_mesh_pkg = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True)[:-1]
except:
print('unable to find ' + NAME + MESHPKG + ' package')
sys.exit(0)
# Set Mesh path
if NAME == 'nao':
node = ur.short(doc, 'xacro:property', 'name', 'PI_2')
node.setAttribute('value', str(math.pi/2.0))
root.appendChild(node)
node = ur.short(doc, 'xacro:property', 'name', 'meshes_installed')
if os.path.isdir(os.path.join(path_mesh_pkg, 'meshes', MESH_VERSION)):
node.setAttribute('value', 'true')
else:
node.setAttribute('value', 'false')
root.appendChild(node)
# Insert xacro macro
for link in robot.links:
(tempVisu, tempCol) = adjustMeshPath(path_mesh_pkg, link)
if robot.links[link].visual is not None:
robot.links[link].xacro = 'xacro:' + prefix + \
robot.links[link].name
node = ur.short(doc, 'xacro:macro', 'name', prefix +
robot.links[link].name)
if NAME == 'nao':
# add xacro condition macro to handle the absence of meshes
node2 = ur.short(doc, 'xacro:unless', 'value',
'${meshes_installed}')
if tempVisu is not None:
node2.appendChild(tempVisu.to_xml(doc))
if tempCol is not None:
node2.appendChild(tempCol.to_xml(doc))
node.appendChild(node2)
node3 = ur.short(doc, 'xacro:if', 'value', '${meshes_installed}')
node3.appendChild(robot.links[link].visual.to_xml(doc))
node3.appendChild(robot.links[link].collision.to_xml(doc))
node.appendChild(node3)
else:
node.appendChild(robot.links[link].visual.to_xml(doc))
node.appendChild(robot.links[link].collision.to_xml(doc))
root.appendChild(node)
robot.links[link].visual = None
robot.links[link].collision = None
filename = OUTPUT[0:OUTPUT.rfind('.')] + '_visual_collisions.xacro'
write_comments_in_xacro(doc, filename)
#################################
######## XACRO FUNCTIONS ########
#################################
def export_robot_element(element):
"""
Export the 'elements' related to the keyword 'element'.
:param : element, string in ['Transmission', 'Gazebo', 'material']
The output file is <ROBOT>_<element>.xacro
"""
global robot, OUTPUT
doc = Document()
root = doc.createElement('robot')
doc.appendChild(root)
root.setAttribute("xmlns:xacro", "http://www.ros.org/wiki/xacro")
for i in robot.elements:
try:
if element == 'Transmission':
if i.name.find(element) != -1:
root.appendChild(i.to_xml(doc))
elif element == 'Gazebo':
if i.reference is not None:
root.appendChild(i.to_xml(doc))
elif i.plugins != []:
root.appendChild(i.to_xml(doc))
elif element == 'material':
if type(i) == naoqi_tools.urdf.Material:
root.appendChild(i.to_xml(doc))
except AttributeError:
pass
filename = OUTPUT[0:OUTPUT.rfind('.')] + '_' + str(element) + '.xacro'
print('exporting ' + element + ' xacro')
write_comments_in_xacro(doc, filename)
def export_robot_to_xacro_files():
"""
Export the entire 'robot' in several xacro files.
One xacro file per kinematic chain (<ROBOT>_legs.xacro,
<ROBOT>_arms.xacro, <ROBOT>_torso.xacro...)
Xacro file for specific parts of the robot (<ROBOT>_fingers.xacro,
<ROBOT>_sensors.xacro)
One xacro file for visual elements (<ROBOT>_visual_collision.xacro,
<ROBOT>_material.xacro)
One xacro file per type of element needed for gazebo simulation
(<ROBOT>_Gazebo.xacro, <ROBOT>_Transmission.xacro)
One generic robot file which includes all the other ones
(<ROBOT>_robot.xacro)
"""
global robot, OUTPUT, NAME
doc = Document()
root = doc.createElement('robot')
doc.appendChild(root)
root.setAttribute("xmlns:xacro", "http://www.ros.org/wiki/xacro")
root.setAttribute("name", robot.name)
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_visual_collisions.xacro'))
create_visual_xacro()
for i in XACRO_DICO.keys():
print('exporting ' + NAME + '_' + i + '.xacro')
if i.find('eye') != -1:
export_kinematic_chain_to_xacro(i, 'HeadRoll_link',
'HeadRoll_link')
else:
export_kinematic_chain_to_xacro(i)
filenamerobot = NAME + '_' + i + '.xacro'
root.appendChild(ur.short(doc, 'xacro:include', 'filename',
filenamerobot))
# Transmission elements not available from Aldebaran libraries yet
export_robot_element('Transmission')
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_Transmission.xacro'))
# Gazebo Plugin not available from Aldebaran libraries yet
export_robot_element('Gazebo')
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_Gazebo.xacro'))
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_sensors.xacro'))
export_list_to_xacro(['_frame'], OUTPUT[0:OUTPUT.rfind('.')] +
'_sensors.xacro')
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_fingers.xacro'))
export_list_to_xacro(['Finger', 'Thumb'], OUTPUT[0:OUTPUT.rfind('.')] +
'_fingers.xacro')
if NAME == 'pepper':
root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +
'_wheels.xacro'))
export_list_to_xacro(['Wheel'], OUTPUT[0:OUTPUT.rfind('.')] +
'_wheels.xacro')
if NAME == 'romeo':
root.appendChild(ur.short(doc, 'xacro:include', 'filename',
'romeo_cap.xacro'))
filename = OUTPUT[0:OUTPUT.rfind('.')] + '_robot.xacro'
write_comments_in_xacro(doc, filename)
print('output directory : ' + OUTPUT[0:OUTPUT.rfind('/') + 1])
def export_kinematic_chain_to_xacro(keyword, baseChain='base_link',
tipRefChain='default'):
"""Export a specific kinematic chain to a xacro file.
:param : keyword, string defining kinematic chains to export
(legs,arms,head,torso)
:param : baseChain, string representing the name of the link where
the reference chain starts
:param : tipRefChain, string representing the name of the link where
the reference chain ends
"""
global robot, OUTPUT
if tipRefChain == 'default':
print('applying torso to end of ref chain')
tipRefChain = XACRO_DICO['torso']
chainRef = robot.get_chain(baseChain, tipRefChain)
print(chainRef)
doc = Document()
root = doc.createElement('robot')
doc.appendChild(root)
root.setAttribute('xmlns:xacro', 'http://www.ros.org/wiki/xacro')
chainNb = 0
try:
chain1 = robot.get_chain(baseChain, 'l_' + XACRO_DICO[keyword])
chain2 = robot.get_chain(baseChain, 'r_' + XACRO_DICO[keyword])
chainNb = 2
except KeyError:
try:
chain1 = robot.get_chain(baseChain, 'L' + XACRO_DICO[keyword])
chain2 = robot.get_chain(baseChain, 'R' + XACRO_DICO[keyword])
chainNb = 2
except KeyError:
try:
chain1 = robot.get_chain(baseChain, XACRO_DICO[keyword])
chainNb = 1
except KeyError:
print('the chain ' + keyword + ' cannot be found')
if chainNb != 0:
duplicate = 0
for i in range(len(chain1)):
for j in range(len(chainRef)):
if chain1[i] == chainRef[j]:
duplicate = 1
if duplicate == 0 or keyword == 'torso':
try:
root.appendChild(robot.links[chain1[i]].to_xml(doc))
except KeyError:
try:
root.appendChild(robot.joints[chain1[i]].to_xml(doc))
except KeyError:
print('unknown element' + chain1[i])
else:
duplicate = 0
if chainNb == 2:
for i in range(len(chain2)):
for j in range(len(chainRef)):
if chain2[i] == chainRef[j]:
duplicate = 1
if duplicate == 0:
try:
root.appendChild(robot.links[chain2[i]].to_xml(doc))
except KeyError:
try:
root.appendChild(
robot.joints[chain2[i]].to_xml(doc))
except KeyError:
print('unknown element' + chain2[i])
else:
duplicate = 0
filename = OUTPUT[0:OUTPUT.rfind('.')] + '_' + keyword + str('.xacro')
write_comments_in_xacro(doc, filename)
def write_comments_in_xacro(doc, filename):
"""
Write the content of the XML Document doc to a file named filename.
Also add comments at the beginning of the file
:param : doc, minidom Document to write
:param : filename, absolute path of the file to write to
"""
if(not os.path.isdir(filename[0:filename.rfind('/') + 1])):
os.makedirs(filename[0:filename.rfind('/')])
file = open(filename, 'w+')
file.write(doc.toprettyxml())
file.close()
file = open(filename, 'r')
firstline, remaining = file.readline(), file.read()
file.close()
file = open(filename, 'w')
file.write(firstline)
file.write(
'<!--**************************************************************\n'
' **** File automatically generated by generate_urdf.py script ****\n'
' **************************************************************-->\n')
file.write(remaining)
file.close()
def export_list_to_xacro(list, filename):
"""Export all links containing a string and its parent joint.
:param : list, list of strings to look for
:param : filename, absolute path of the file to write to
"""
global robot, OUTPUT
doc = Document()
root = doc.createElement('robot')
doc.appendChild(root)
root.setAttribute("xmlns:xacro", "http://www.ros.org/wiki/xacro")
print ('exporting ' + os.path.basename(filename))
for string in list:
for link in robot.links:
if robot.links[link].name.find(string) != -1:
root.appendChild(robot.links[link].to_xml(doc))
for joint in robot.joints:
if robot.joints[joint].child == robot.links[link].name:
root.appendChild(robot.joints[joint].to_xml(doc))
write_comments_in_xacro(doc, filename)
def adjustMeshPath(path_mesh_pkg, link):
"""
Find the path of a mesh according to the link definition.
Set the visual and collision element of the given link
:param : path_mesh_pkg, absolute path of the package where the meshes
should be located
:param : link, dictionary key of the link we want to set
visual and collision parameters
:return : tempVisu, Visual element with the NAO visual geometrical shape
for people who don't have the meshes
:return : tempCol, Collision element with the NAO collision geometrical
shape for people who don't have the meshes
"""
global robot, SCALE, MESHPKG, MESH_VERSION
tempVisu = None
tempVisuMesh = None
tempCol = None
tempColMesh = None
if robot.links[link].visual is not None:
try:
meshname = str(
LINKS_DICO.keys()[list(LINKS_DICO.values()).index(link)])
if meshname.endswith('_link'):
meshfile = meshname[0:-5]
else:
meshfile = meshname
except:
meshname = link
meshfile = link
pass
if meshfile.endswith('_link'):
meshfile = meshfile[0:-5]
tempVisuMesh = ur.Visual(ur.Mesh('', (SCALE, SCALE, SCALE)))
tempColMesh = ur.Collision(ur.Mesh('', (SCALE, SCALE, SCALE)))
tempVisuMesh.origin = robot.links[link].visual.origin
tempColMesh.origin = robot.links[link].visual.origin
if os.path.isfile(os.path.join(path_mesh_pkg, 'meshes', MESH_VERSION,
robot.links[link].visual.geometry.filename[
robot.links[link].visual.geometry.filename.rfind('/') + 1:])):
tempVisuMesh.geometry.filename = os.path.join(
'package://', NAME + MESHPKG, 'meshes', MESH_VERSION,
robot.links[link].visual.geometry.filename[
robot.links[link].visual.geometry.filename.rfind('/')+1:])
tempColMesh.geometry.filename = \
tempVisuMesh.geometry.filename[0:-4] + COLLISION_SUFFIX
else:
tempVisuMesh.geometry.filename = os.path.join(
'package://', NAME + MESHPKG, 'meshes', MESH_VERSION,
meshfile + '.dae')
tempColMesh.geometry.filename = \
tempVisuMesh.geometry.filename[0:-4] + COLLISION_SUFFIX
if NAME == 'nao':
try:
tempVisu = ur.Visual(
VISU_DICO[meshname], ur.Material('LightGrey'),
dico.Nao_orig[meshname])
tempCol = ur.Collision(VISU_DICO[meshname],
dico.Nao_orig[meshname])
except KeyError:
tempVisu = None
tempCol = None
robot.links[link].visual = tempVisuMesh
robot.links[link].collision = tempColMesh
return (tempVisu, tempCol)
##############
#### Main ####
##############
args = parser.parse_args()
if args.input is '':
robot = URDF.from_parameter_server()
else:
robot = URDF.load_xml_file(args.input)
if robot.name.find('V') != -1:
VERSION = robot.name[robot.name.find('V'):]
else:
VERSION = args.input[args.input.find('V'):args.input.find('V') + 3]
if robot.name.lower().find('nao') != -1:
NAME = 'nao'
try:
import naoqi_tools.nao_dictionaries as dico
print('import nao dictionaries')
except:
print('unable to import nao dictionaries')
sys.exit(0)
LINKS_DICO = dico.Nao_links
VISU_DICO = dico.Nao_visu
OFFSETS_DICO = dico.Nao_offsets
XACRO_DICO = NAO_XACRO_DICO
MESHPKG = '_meshes'
SCALE = 0.1
elif robot.name.lower().find('romeo') != -1:
NAME = 'romeo'
try:
import naoqi_tools.romeo_dictionaries as dico
except:
print('unable to import romeo dictionaries')
sys.exit(0)
LINKS_DICO = dico.Romeo_links
OFFSETS_DICO = dico.Romeo_offsets
VISU_DICO = ''
XACRO_DICO = ROMEO_XACRO_DICO
MESHPKG = '_description'
SCALE = 1
elif robot.name.lower().find('juliette') or robot.name.lower().find('pepper'):
NAME = 'pepper'
try:
import naoqi_tools.pepper_dictionaries as dico
except:
print('unable to import pepper dictionaries')
sys.exit(0)
LINKS_DICO = dico.Pepper_links
OFFSETS_DICO = dico.Pepper_offsets
VISU_DICO = ''
XACRO_DICO = PEPPER_XACRO_DICO
print('PROCESSING PEPPER ROBOT')
MESHPKG = '_meshes'
SCALE = 0.1
VERSION = '1.0'
for element in robot.elements:
if type(element) == naoqi_tools.urdf.Material:
robot.elements.remove(element)
cmd = 'rospack find ' + NAME + '_description'
try:
pathdescription = subprocess.check_output(cmd,
stderr=subprocess.STDOUT, shell=True)[:-1]
except:
print('unable to find ' + NAME + '_description package')
sys.exit(0)
OUTPUT = os.path.join(pathdescription, 'urdf', NAME + VERSION +
'_generated_urdf', NAME + '.urdf')
print('processing ' + NAME + ' (' + VERSION + ") robot's urdf file in " +
OUTPUT)
cmd = 'rospack find ' + NAME + MESHPKG
try:
path_mesh_pkg = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True)[:-1]
except:
print('unable to find ' + NAME + MESHPKG + ' package')
sys.exit(0)
define_materials()
if args.REP120 == 'true':
REP120_compatibility()
for link in robot.links:
adjustMeshPath(path_mesh_pkg, link)
if args.xacro == 'robot':
export_robot_element('material')
export_robot_to_xacro_files()
elif args.xacro == 'urdf':
robot.write_xml(OUTPUT)
else:
export_kinematic_chain_to_xacro(args.xacro)
| |
"""
Unittests for tinymr.tools
"""
from collections import defaultdict
from multiprocessing.pool import IMapUnorderedIterator
import os
import pickle
from types import GeneratorType
import pytest
import six
from tinysort import tools
def _icount_lines(path, minimum=1):
"""
Count lines by opening the file and iterating over the file.
"""
count = 0
with open(path) as f:
for l in f:
count += 1
assert count >= minimum
return count
def test_slicer_even():
it = tools.slicer(six.moves.xrange(100), 10)
for idx, actual in enumerate(it):
assert isinstance(actual, tuple)
assert len(actual) == 10
# Verify that the values are correct
assert actual == tuple((10 * idx) + i for i in range(len(actual)))
assert idx == 9
def test_slicer_odd():
it = tools.slicer(range(5), 2)
assert next(it) == (0, 1)
assert next(it) == (2, 3)
assert next(it) == (4, )
with pytest.raises(StopIteration):
next(it)
def _func(v):
"""
Can't pickle local functions.
"""
return v + 1
def test_runner_1job():
input = list(range(10))
expected = tuple(i + 1 for i in input)
j1 = tools.runner(_func, input, 1)
assert isinstance(j1, tools.runner)
assert isinstance(iter(j1), GeneratorType)
assert tuple(j1) == expected
def test_runner_2job():
input = list(range(10))
expected = tuple(i + 1 for i in input)
# Also tests context manager
with tools.runner(_func, input, 2) as j2:
assert not j2._closed
assert isinstance(j2, tools.runner)
assert isinstance(iter(j2), IMapUnorderedIterator)
assert tuple(sorted(j2)) == expected
assert j2._closed
def test_runner_next():
input = list(range(10))
expected = list(i + 1 for i in input)
r = tools.runner(_func, input, 1)
assert next(r) == _func(input[0])
# Multiple jobs - have to pretty much run the whole thing and sort to compare
results = []
with tools.runner(_func, input, 2) as proc:
for i in input:
results.append(next(proc))
assert sorted(results) == expected
def test_runner_attrs_and_exceptions():
# repr
r = tools.runner(_func, range(10), 2)
assert repr(r).startswith(r.__class__.__name__)
assert 'jobs=2' in repr(r)
assert 'iterable={}'.format(repr(range(10))) in repr(r)
# Bad values
with pytest.raises(ValueError):
tools.runner(None, None, -1)
def test_Orderable():
on = tools.Orderable(None)
for v in (-1, 0, 1):
assert on < v
assert on <= v
assert not on > v
assert not on >= v
assert on != v
assert on.obj is None
on = tools.Orderable(None, lt=False, le=False, gt=True, ge=True)
for v in (-1, 0, 1):
assert on > v
assert on >= v
assert not on < v
assert not on <= v
assert on != v
assert on.obj is None
# Actually perform equality test
on = tools.Orderable(None, eq=None)
assert not on is False
assert not on == 67
# Never equal to a type
on = tools.Orderable(None, eq=False)
assert not on == 'True'
assert not on == 21
# Always equal to any type
on = tools.Orderable(None, eq=True)
assert not on == 'False'
def test_OrderableNone():
assert isinstance(tools.OrderableNone, tools._OrderableNone)
assert tools.OrderableNone.obj is None
assert tools.OrderableNone != 1
def test_count_lines_exception(linecount_file):
"""
Make sure known exceptions in `count_lines()` are raised.
"""
path = linecount_file()
with pytest.raises(ValueError):
tools.count_lines(path, linesep='too many chars')
@pytest.mark.parametrize("linesep", ["\n", "\r\n"])
def test_count_lines_small(linesep, linecount_file):
"""
Count lines of a file that fits in the buffer.
"""
path = linecount_file(linesep)
buff = os.stat(path).st_size + 2
assert _icount_lines(path) == tools.count_lines(
path, linesep=linesep, buffer=buff)
@pytest.mark.parametrize("linesep", ["\n", "\r\n"])
def test_count_lines_buffered(linesep, linecount_file):
"""
Use the buffered method to count lines
"""
path = linecount_file(linesep)
buff = os.stat(path).st_size // 4
assert _icount_lines(path) == tools.count_lines(
path, linesep=linesep, buffer=buff)
def test_count_lines_split_buffer(tmpdir):
"""
Explicitly test a scenario where the `linesep` character is 2 bytes long
and is split across blocks.
"""
path = str(tmpdir.mkdir('test_count_lines').join('split_buffer'))
with open(path, 'wb') as f:
f.write(b'\r\nhey some words')
assert tools.count_lines(path, buffer=1, linesep='\r\n') == 1
def test_count_lines_literal_linesep(tmpdir):
"""
Explicitly test a scenario where the input file contains a literal '\n'.
"""
path = str(tmpdir.mkdir('test_count_lines').join('literal_linesep'))
with open(path, 'w') as f:
f.write('first line with stuff' + os.linesep)
f.write('before \{} after'.format(os.linesep) + os.linesep)
assert tools.count_lines(path) == 3
def test_count_lines_empty(tmpdir):
"""
Completely empty file.
"""
path = str(tmpdir.mkdir('test_count_lines').join('empty'))
with open(path, 'w') as f:
pass
assert tools.count_lines(path) == 0
def test_count_lines_only_linesep(tmpdir):
"""
File only contains a `linesep`.
"""
path = str(tmpdir.mkdir('test_count_lines').join('only_linesep'))
with open(path, 'w') as f:
f.write(os.linesep)
assert tools.count_lines(path) == 1
def test_count_lines_trailing_linesep(tmpdir):
"""
Last line has a trailing `linesep`.
"""
path = str(tmpdir.mkdir('test_count_lines').join('trailing_linesep'))
with open(path, 'w') as f:
f.write('line1' + os.linesep)
f.write('line2' + os.linesep)
f.write('line3' + os.linesep)
assert tools.count_lines(path) == 3
def test_same_Orderable():
assert tools.Orderable(None) == tools.Orderable(None)
assert tools.Orderable(1) == tools.Orderable(1)
def test_make_orderable():
assert tools.make_orderable(None) == tools.Orderable(None)
assert tools.make_orderable(None) != tools.make_orderable(1)
def test_pickle_OrderableNone():
p = pickle.dumps(tools.OrderableNone)
assert isinstance(pickle.loads(p), tools._OrderableNone)
def test_pickle_Orderable():
obj = tools.make_orderable('stuff')
p = pickle.dumps(obj)
l = pickle.loads(p)
assert isinstance(l, tools.Orderable)
assert l.__class__.__dict__ == obj.__class__.__dict__
assert l.obj == obj.obj == 'stuff'
| |
# use this if you want to include modules from a subforder.
# used for the unit tests to import the globs module
import os, sys, inspect
cmd_subfolder = os.path.realpath(os.path.abspath( os.path.join(os.path.split \
(inspect.getfile( inspect.currentframe() ))[0],"../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import re
import globs
class BlifIO:
def __init__(self,name,index):
##blif name of the input
self.blifName = name
##io index: a clock get index -1.
##It is assumend that when the reset signal modelname^reset is used it
#gets index 0 and the rest of inputs get an index increased by 1.
#otherwise the rest of input names start by index 0.
self.ioIndex = index
class BlifLatch:
def __init__(self,inputNet,outputNet):
##the blif name of the input net
self.inputNet = inputNet
##the blif name of the ouput net
self.outputNet = outputNet
class BlifNames:
def __init__(self,inputNets,outputNet,content):
##list of blif input net names
self.inputNets = inputNets
##the blif name of the ouput net
self.outputNet = outputNet
#ABC format of the content of a logic gate:
#1) if the content consist of only a 1 or 0, then we have
# a constant 1 or 0 as the output
#2) else we have at least one a PLA description line
# like 0101-1 1 or 01- 0
#internal representation:
# For 1) we store a tuple ('-', constant output value)
# and for 2) we store the two given values:
# (k-input,output value)
self.content = content
#an object to collect all information gathered in the blif file.
class BlifFile:
def __init__(self):
##the input blif names: a list of BLifIO objetcs (blifname,ioIndex)
self.inputs = []
##the ouput blif names: a list of BLifIO objetcs (blifname,ioIndex)
self.outputs = []
##a list of BlifLatch objects
self.latches = []
##a list of BlifNames objects
self.names = []
#the blif model name of the circuit.
self.circuitName = ""
##need by vpr > 8: extract only the modelname of a blif file
#@return the modelname
def extractModelName(filename):
fh = open(filename,"r")
line = fh.readline()
# Parse the source blif file
# search the model entry
while len(line) > 0:
if line.find(".model") > -1:
#extract the modelname and save it in the blif struct
items = line.strip().split(' ')
modelName = items[1]
return modelName
else:
#read the next line
line = fh.readline()
return None
##parser the blif file
#@return a Blif File object
def parseBlif(filename):
#an object to collect all information gathered in the file.
blifFile = BlifFile()
fh = open(filename,"r")
line = fh.readline()
# Parse the source blif file
# create and add the blif objetcs to the blifFile object
while len(line) > 0:
if line.find(".model") > -1:
#extract the modelname and save it in the blif struct
items = line.strip().split(' ')
#if vpr < 8 was used, the model has a name but only top is used
#as a prefix for every signal instead the modelname
#so we use top as the modulename here
if globs.params.vprVersion == 8:
blifFile.circuitName = items[1]
else:
blifFile.circuitName = "top"
#read the next line
line = fh.readline()
#get the blif names of the inputs
elif line.find(".inputs") > -1:
inputoffset = 0
#read items until no more backslashes appear
items = line.strip().split(' ')[1:]
while items[-1] == '\\':
items = items[:-1]
nextline = fh.readline()
items = items + nextline.strip().split(' ')
for item in items:
# append the blif name to the global input list
name = item.strip()
index = -1
if name == blifFile.circuitName + '^clock':
#add the input to the blif file
index = -1
elif name == blifFile.circuitName + '^reset':
# set reset to the first input pin
index = 0
inputoffset += 1
elif name == blifFile.circuitName + '^in':
# just one input
index = 0
else:
#extract the index from the name
nums = re.findall(r'\d+', item)
nums = [int(i) for i in nums ]
index = nums[-1] + inputoffset
#add the io to the blifFile object
blifIO = BlifIO(name,index)
blifFile.inputs.append(blifIO)
#read the next line
line = fh.readline()
#get the blif names of the outputs
elif line.find(".outputs") > -1:
#read items until no more backslashes appear
items = line.strip().split(' ')[1:]
while items[-1] == '\\':
items = items[:-1]
nextline = fh.readline()
items = items + nextline.strip().split(' ')
for item in items:
# append the blif name to the global output list
name = item.strip()
if name == blifFile.circuitName + '^out':
# just one output
index = 0
else:
#extract the index from the name
nums = re.findall(r'\d+', item)
nums = [int(i) for i in nums ]
index = nums[-1]
#add the io to the blifFile object
blifIO = BlifIO(name,index)
blifFile.outputs.append(blifIO)
#read the next line
line = fh.readline()
#got a latch
elif line.find(".latch") > -1:
#read items until no more backslashes
items = line.strip().split(' ',1)[1].strip().split(' ')
while items[-1] == '\\':
items = items[:-1]
nextline = fh.readline()
items = items + nextline.strip().split(' ')
#get the net names
inputNet = items[0]
outputNet = items[1]
#add the latch to the blifFile object
blifLatch = BlifLatch(inputNet,outputNet)
blifFile.latches.append(blifLatch)
#read the next line
line = fh.readline()
#got a lut.
elif line.find(".names") > -1:
#first read the input nets and output net names, then parse the content
#read items until no more backslashes appear
items = line.strip().split(' ')[1:]
while items[-1] == '\\':
items = items[:-1]
nextline = fh.readline()
items = items + nextline.strip().split(' ')
#parse input nets and output nets
inputNets = items[:-1]
outputNet = items[-1]
#now read the content
line = fh.readline()
items = line.split()
content = []
#ABC format of the content of a logic gate:
#1) if the content consist of only a 1 or 0, then we have
# a constant 1 or 0 as the output
#2) else we have at least one a PLA description line
# like 0101-1 1 or 01- 0
#internal representation:
# For 1) we store a tuple ('-', constant output value)
# and for 2) we store the two given values:
# (k-input,output value)
while items[-1] == '1' or items[-1] == '0' :
#option 1)
#just a single output value
if (len(items) < 2):
content.append(('-',items[0]))
#option 2)
else:
content.append((items[0],items[1]))
line = fh.readline()
items = line.split()
#assign the content and other infos to a blifFile object
blifNames = BlifNames(inputNets,outputNet,content)
blifFile.names.append(blifNames)
else:
#read the next line
line = fh.readline()
#return the gathered infos
return blifFile
def simpleTest():
globs.init()
globs.load_params()
globs.params.vprVersion = 7
blif = parseBlif('abc_out.blif')
print "\ninputs \n"
for input in blif.inputs:
print str(input.blifName) +" "+ str(input.ioIndex) + "\n"
print "\noutputs \n"
for output in blif.outputs:
print str(output.blifName) +" "+ str(output.ioIndex) + "\n"
print "\n latchces \n"
for latch in blif.latches:
print str(latch.inputNet) +" "+ str(latch.outputNet) + "\n"
print "\n names \n"
for name in blif.names:
print str(name.inputNets) +" "+ str(name.outputNet) + "\n"
print str(name.content)
def main():
simpleTest()
if __name__ == '__main__':
main()
| |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import array
import inspect
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib.packet import *
from ryu.lib import addrconv
LOG = logging.getLogger('test_packet')
class TestPacket(unittest.TestCase):
""" Test case for packet
"""
dst_mac = 'aa:aa:aa:aa:aa:aa'
src_mac = 'bb:bb:bb:bb:bb:bb'
dst_mac_bin = addrconv.mac.text_to_bin(dst_mac)
src_mac_bin = addrconv.mac.text_to_bin(src_mac)
dst_ip = '192.168.128.10'
src_ip = '192.168.122.20'
dst_ip_bin = addrconv.ipv4.text_to_bin(dst_ip)
src_port = 50001
dst_port = 50002
src_ip_bin = addrconv.ipv4.text_to_bin(src_ip)
payload = '\x06\x06\x47\x50\x00\x00\x00\x00' \
+ '\xcd\xc5\x00\x00\x00\x00\x00\x00' \
+ '\x10\x11\x12\x13\x14\x15\x16\x17' \
+ '\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
def get_protocols(self, pkt):
protocols = {}
for p in pkt:
if hasattr(p, 'protocol_name'):
protocols[p.protocol_name] = p
else:
protocols['payload'] = p
return protocols
def setUp(self):
pass
def tearDown(self):
pass
def test_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac_bin \
+ self.src_ip_bin \
+ self.dst_mac_bin \
+ self.dst_ip_bin
buf = e_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_ARP, p_eth.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_ARP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
arp_values = {'hwtype': 1,
'proto': ether.ETH_TYPE_IP,
'hlen': 6,
'plen': 4,
'opcode': 2,
'src_mac': self.src_mac,
'dst_mac': self.dst_mac,
'src_ip': self.src_ip,
'dst_ip': self.dst_ip}
_arp_str = ','.join(['%s=%s' % (k, repr(arp_values[k]))
for k, v in inspect.getmembers(p_arp)
if k in arp_values])
arp_str = '%s(%s)' % (arp.arp.__name__, _arp_str)
pkt_str = '%s, %s' % (eth_str, arp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(arp_str, str(p_arp))
eq_(arp_str, repr(p_arp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_vlan_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_8021Q)
v = vlan.vlan(0b111, 0b1, 3, ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(v)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x81\x00'
# vlan !HH
v_buf = '\xF0\x03' \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac_bin \
+ self.src_ip_bin \
+ self.dst_mac_bin \
+ self.dst_ip_bin
buf = e_buf + v_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_vlan = protocols['vlan']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_8021Q, p_eth.ethertype)
# vlan
ok_(p_vlan)
eq_(0b111, p_vlan.pcp)
eq_(0b1, p_vlan.cfi)
eq_(3, p_vlan.vid)
eq_(ether.ETH_TYPE_ARP, p_vlan.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_8021Q}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
vlan_values = {'pcp': 0b111,
'cfi': 0b1,
'vid': 3,
'ethertype': ether.ETH_TYPE_ARP}
_vlan_str = ','.join(['%s=%s' % (k, repr(vlan_values[k]))
for k, v in inspect.getmembers(p_vlan)
if k in vlan_values])
vlan_str = '%s(%s)' % (vlan.vlan.__name__, _vlan_str)
arp_values = {'hwtype': 1,
'proto': ether.ETH_TYPE_IP,
'hlen': 6,
'plen': 4,
'opcode': 2,
'src_mac': self.src_mac,
'dst_mac': self.dst_mac,
'src_ip': self.src_ip,
'dst_ip': self.dst_ip}
_arp_str = ','.join(['%s=%s' % (k, repr(arp_values[k]))
for k, v in inspect.getmembers(p_arp)
if k in arp_values])
arp_str = '%s(%s)' % (arp.arp.__name__, _arp_str)
pkt_str = '%s, %s, %s' % (eth_str, vlan_str, arp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(vlan_str, str(p_vlan))
eq_(vlan_str, repr(p_vlan))
eq_(arp_str, str(p_arp))
eq_(arp_str, repr(p_arp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_udp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 1, 0, 3, 1, 4, 64, inet.IPPROTO_UDP, 0,
self.src_ip, self.dst_ip)
u = udp.udp(0x190F, 0x1F90, 0, 0)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(u)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x01' \
+ '\x00\x3C' \
+ '\x00\x03' \
+ '\x20\x04' \
+ '\x40' \
+ '\x11' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# udp !HHHH
u_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x28' \
+ '\x00\x00'
buf = e_buf + ip_buf + u_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_udp = protocols['udp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(1, p_ipv4.tos)
l = len(ip_buf) + len(u_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(3, p_ipv4.identification)
eq_(1, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_UDP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# udp
ok_(p_udp)
eq_(0x190f, p_udp.src_port)
eq_(0x1F90, p_udp.dst_port)
eq_(len(u_buf) + len(self.payload), p_udp.total_length)
eq_(0x77b2, p_udp.csum)
t = bytearray(u_buf)
struct.pack_into('!H', t, 6, p_udp.csum)
ph = struct.pack('!4s4sBBH', self.src_ip_bin, self.dst_ip_bin, 0,
17, len(u_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 1,
'total_length': l,
'identification': 3,
'flags': 1,
'offset': p_ipv4.offset,
'ttl': 64,
'proto': inet.IPPROTO_UDP,
'csum': p_ipv4.csum,
'src': self.src_ip,
'dst': self.dst_ip,
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
udp_values = {'src_port': 0x190f,
'dst_port': 0x1F90,
'total_length': len(u_buf) + len(self.payload),
'csum': 0x77b2}
_udp_str = ','.join(['%s=%s' % (k, repr(udp_values[k]))
for k, v in inspect.getmembers(p_udp)
if k in udp_values])
udp_str = '%s(%s)' % (udp.udp.__name__, _udp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv4_str, udp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(udp_str, str(p_udp))
eq_(udp_str, repr(p_udp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_tcp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 0, 0, 0, 0, 0, 64, inet.IPPROTO_TCP, 0,
self.src_ip, self.dst_ip)
t = tcp.tcp(0x190F, 0x1F90, 0x123, 1, 6, 0b101010, 2048, 0, 0x6f,
'\x01\x02')
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(t)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x4C' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x40' \
+ '\x06' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# tcp !HHIIBBHHH + option
t_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x00\x01\x23' \
+ '\x00\x00\x00\x01' \
+ '\x60' \
+ '\x2A' \
+ '\x08\x00' \
+ '\x00\x00' \
+ '\x00\x6F' \
+ '\x01\x02\x00\x00'
buf = e_buf + ip_buf + t_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_tcp = protocols['tcp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(t_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_TCP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# tcp
ok_(p_tcp)
eq_(0x190f, p_tcp.src_port)
eq_(0x1F90, p_tcp.dst_port)
eq_(0x123, p_tcp.seq)
eq_(1, p_tcp.ack)
eq_(6, p_tcp.offset)
eq_(0b101010, p_tcp.bits)
eq_(2048, p_tcp.window_size)
eq_(0x6f, p_tcp.urgent)
eq_(len(t_buf), len(p_tcp))
t = bytearray(t_buf)
struct.pack_into('!H', t, 16, p_tcp.csum)
ph = struct.pack('!4s4sBBH', self.src_ip_bin, self.dst_ip_bin, 0,
6, len(t_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': p_ipv4.offset,
'ttl': 64,
'proto': inet.IPPROTO_TCP,
'csum': p_ipv4.csum,
'src': self.src_ip,
'dst': self.dst_ip,
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
tcp_values = {'src_port': 0x190f,
'dst_port': 0x1F90,
'seq': 0x123,
'ack': 1,
'offset': 6,
'bits': 0b101010,
'window_size': 2048,
'csum': p_tcp.csum,
'urgent': 0x6f,
'option': p_tcp.option}
_tcp_str = ','.join(['%s=%s' % (k, repr(tcp_values[k]))
for k, v in inspect.getmembers(p_tcp)
if k in tcp_values])
tcp_str = '%s(%s)' % (tcp.tcp.__name__, _tcp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv4_str, tcp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(tcp_str, str(p_tcp))
eq_(tcp_str, repr(p_tcp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_sctp(self):
# build packet
e = ethernet.ethernet()
ip = ipv4.ipv4(proto=inet.IPPROTO_SCTP)
s = sctp.sctp(chunks=[sctp.chunk_data(payload_data=self.payload)])
p = e / ip / s
p.serialize()
ipaddr = addrconv.ipv4.text_to_bin('0.0.0.0')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x50' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\xff' \
+ '\x84' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# sctp !HHII + chunk_data !BBHIHHI + payload
s_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ self.payload
buf = e_buf + ip_buf + s_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_sctp = protocols['sctp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(s_buf)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(255, p_ipv4.ttl)
eq_(inet.IPPROTO_SCTP, p_ipv4.proto)
eq_('0.0.0.0', p_ipv4.src)
eq_('0.0.0.0', p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# sctp
ok_(p_sctp)
eq_(0, p_sctp.src_port)
eq_(0, p_sctp.dst_port)
eq_(0, p_sctp.vtag)
assert isinstance(p_sctp.chunks[0], sctp.chunk_data)
eq_(0, p_sctp.chunks[0]._type)
eq_(0, p_sctp.chunks[0].unordered)
eq_(0, p_sctp.chunks[0].begin)
eq_(0, p_sctp.chunks[0].end)
eq_(16 + len(self.payload), p_sctp.chunks[0].length)
eq_(0, p_sctp.chunks[0].tsn)
eq_(0, p_sctp.chunks[0].sid)
eq_(0, p_sctp.chunks[0].seq)
eq_(0, p_sctp.chunks[0].payload_id)
eq_(self.payload, p_sctp.chunks[0].payload_data)
eq_(len(s_buf), len(p_sctp))
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': 0,
'ttl': 255,
'proto': inet.IPPROTO_SCTP,
'csum': p_ipv4.csum,
'src': '0.0.0.0',
'dst': '0.0.0.0',
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
data_values = {'unordered': 0,
'begin': 0,
'end': 0,
'length': 16 + len(self.payload),
'tsn': 0,
'sid': 0,
'seq': 0,
'payload_id': 0,
'payload_data': self.payload}
_data_str = ','.join(['%s=%s' % (k, repr(data_values[k]))
for k in sorted(data_values.keys())])
data_str = '[%s(%s)]' % (sctp.chunk_data.__name__, _data_str)
sctp_values = {'src_port': 0,
'dst_port': 0,
'vtag': 0,
'csum': p_sctp.csum,
'chunks': data_str}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(p_sctp)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv4_str, sctp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(sctp_str, str(p_sctp))
eq_(sctp_str, repr(p_sctp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_icmp(self):
# buid packet
e = ethernet.ethernet()
ip = ipv4.ipv4(proto=inet.IPPROTO_ICMP)
ic = icmp.icmp()
p = e / ip / ic
p.serialize()
ipaddr = addrconv.ipv4.text_to_bin('0.0.0.0')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x1c' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\xff' \
+ '\x01' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# icmp !BBH + echo !HH
ic_buf = '\x08' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00'
buf = e_buf + ip_buf + ic_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_icmp = protocols['icmp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(ic_buf)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(255, p_ipv4.ttl)
eq_(inet.IPPROTO_ICMP, p_ipv4.proto)
eq_('0.0.0.0', p_ipv4.src)
eq_('0.0.0.0', p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# icmp
ok_(p_icmp)
eq_(8, p_icmp.type)
eq_(0, p_icmp.code)
eq_(0, p_icmp.data.id)
eq_(0, p_icmp.data.seq)
eq_(len(ic_buf), len(p_icmp))
t = bytearray(ic_buf)
struct.pack_into('!H', t, 2, p_icmp.csum)
eq_(packet_utils.checksum(t), 0)
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, _ in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': p_ipv4.offset,
'ttl': 255,
'proto': inet.IPPROTO_ICMP,
'csum': p_ipv4.csum,
'src': '0.0.0.0',
'dst': '0.0.0.0',
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, _ in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
echo_values = {'id': 0,
'seq': 0,
'data': None}
_echo_str = ','.join(['%s=%s' % (k, repr(echo_values[k]))
for k in sorted(echo_values.keys())])
echo_str = '%s(%s)' % (icmp.echo.__name__, _echo_str)
icmp_values = {'type': 8,
'code': 0,
'csum': p_icmp.csum,
'data': echo_str}
_icmp_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, _ in inspect.getmembers(p_icmp)
if k in icmp_values])
icmp_str = '%s(%s)' % (icmp.icmp.__name__, _icmp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv4_str, icmp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(icmp_str, str(p_icmp))
eq_(icmp_str, repr(p_icmp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_udp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_UDP)
u = udp.udp()
p = e / ip / u / self.payload
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x11' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# udp !HHHH
u_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x28' \
+ '\x00\x00'
buf = e_buf + ip_buf + u_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_udp = protocols['udp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(u_buf) + len(self.payload), p_ipv6.payload_length)
eq_(inet.IPPROTO_UDP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('::', p_ipv6.src)
eq_('::', p_ipv6.dst)
# udp
ok_(p_udp)
eq_(0, p_udp.src_port)
eq_(0, p_udp.dst_port)
eq_(len(u_buf) + len(self.payload), p_udp.total_length)
eq_(0x2bc2, p_udp.csum)
t = bytearray(u_buf)
struct.pack_into('!H', t, 6, p_udp.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr,
len(u_buf) + len(self.payload), 17)
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(u_buf) + len(self.payload),
'nxt': inet.IPPROTO_UDP,
'hop_limit': 255,
'src': '::',
'dst': '::',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
udp_values = {'src_port': 0,
'dst_port': 0,
'total_length': len(u_buf) + len(self.payload),
'csum': 0x2bc2}
_udp_str = ','.join(['%s=%s' % (k, repr(udp_values[k]))
for k, v in inspect.getmembers(p_udp)
if k in udp_values])
udp_str = '%s(%s)' % (udp.udp.__name__, _udp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv6_str, udp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(udp_str, str(p_udp))
eq_(udp_str, repr(p_udp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_tcp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6()
t = tcp.tcp(option='\x01\x02')
p = e / ip / t / self.payload
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x06' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# tcp !HHIIBBHHH + option
t_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x60' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x01\x02\x00\x00'
buf = e_buf + ip_buf + t_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_tcp = protocols['tcp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(t_buf) + len(self.payload), p_ipv6.payload_length)
eq_(inet.IPPROTO_TCP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('::', p_ipv6.src)
eq_('::', p_ipv6.dst)
# tcp
ok_(p_tcp)
eq_(0, p_tcp.src_port)
eq_(0, p_tcp.dst_port)
eq_(0, p_tcp.seq)
eq_(0, p_tcp.ack)
eq_(6, p_tcp.offset)
eq_(0, p_tcp.bits)
eq_(0, p_tcp.window_size)
eq_(0, p_tcp.urgent)
eq_(len(t_buf), len(p_tcp))
t = bytearray(t_buf)
struct.pack_into('!H', t, 16, p_tcp.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr,
len(t_buf) + len(self.payload), 6)
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(t_buf) + len(self.payload),
'nxt': inet.IPPROTO_TCP,
'hop_limit': 255,
'src': '::',
'dst': '::',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
tcp_values = {'src_port': 0,
'dst_port': 0,
'seq': 0,
'ack': 0,
'offset': 6,
'bits': 0,
'window_size': 0,
'csum': p_tcp.csum,
'urgent': 0,
'option': p_tcp.option}
_tcp_str = ','.join(['%s=%s' % (k, repr(tcp_values[k]))
for k, v in inspect.getmembers(p_tcp)
if k in tcp_values])
tcp_str = '%s(%s)' % (tcp.tcp.__name__, _tcp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv6_str, tcp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(tcp_str, str(p_tcp))
eq_(tcp_str, repr(p_tcp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_sctp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_SCTP)
s = sctp.sctp(chunks=[sctp.chunk_data(payload_data=self.payload)])
p = e / ip / s
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x84' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# sctp !HHII + chunk_data !BBHIHHI + payload
s_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ self.payload
buf = e_buf + ip_buf + s_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_sctp = protocols['sctp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(s_buf), p_ipv6.payload_length)
eq_(inet.IPPROTO_SCTP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('::', p_ipv6.src)
eq_('::', p_ipv6.dst)
# sctp
ok_(p_sctp)
eq_(0, p_sctp.src_port)
eq_(0, p_sctp.dst_port)
eq_(0, p_sctp.vtag)
assert isinstance(p_sctp.chunks[0], sctp.chunk_data)
eq_(0, p_sctp.chunks[0]._type)
eq_(0, p_sctp.chunks[0].unordered)
eq_(0, p_sctp.chunks[0].begin)
eq_(0, p_sctp.chunks[0].end)
eq_(16 + len(self.payload), p_sctp.chunks[0].length)
eq_(0, p_sctp.chunks[0].tsn)
eq_(0, p_sctp.chunks[0].sid)
eq_(0, p_sctp.chunks[0].seq)
eq_(0, p_sctp.chunks[0].payload_id)
eq_(self.payload, p_sctp.chunks[0].payload_data)
eq_(len(s_buf), len(p_sctp))
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(s_buf),
'nxt': inet.IPPROTO_SCTP,
'hop_limit': 255,
'src': '::',
'dst': '::',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
data_values = {'unordered': 0,
'begin': 0,
'end': 0,
'length': 16 + len(self.payload),
'tsn': 0,
'sid': 0,
'seq': 0,
'payload_id': 0,
'payload_data': self.payload}
_data_str = ','.join(['%s=%s' % (k, repr(data_values[k]))
for k in sorted(data_values.keys())])
data_str = '[%s(%s)]' % (sctp.chunk_data.__name__, _data_str)
sctp_values = {'src_port': 0,
'dst_port': 0,
'vtag': 0,
'csum': p_sctp.csum,
'chunks': data_str}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(p_sctp)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv6_str, sctp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(sctp_str, str(p_sctp))
eq_(sctp_str, repr(p_sctp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_icmpv6(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6()
p = e / ip / ic
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x3a' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# icmpv6 !BBH
ic_buf = '\x00' \
+ '\x00' \
+ '\x00\x00'
buf = e_buf + ip_buf + ic_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_icmpv6 = protocols['icmpv6']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(ic_buf), p_ipv6.payload_length)
eq_(inet.IPPROTO_ICMPV6, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('::', p_ipv6.src)
eq_('::', p_ipv6.dst)
# icmpv6
ok_(p_icmpv6)
eq_(0, p_icmpv6.type_)
eq_(0, p_icmpv6.code)
eq_(len(ic_buf), len(p_icmpv6))
t = bytearray(ic_buf)
struct.pack_into('!H', t, 2, p_icmpv6.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr, len(ic_buf), 58)
t = ph + t
eq_(packet_utils.checksum(t), 0)
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, _ in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(ic_buf),
'nxt': inet.IPPROTO_ICMPV6,
'hop_limit': 255,
'src': '::',
'dst': '::',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, _ in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
icmpv6_values = {'type_': 0,
'code': 0,
'csum': p_icmpv6.csum,
'data': None}
_icmpv6_str = ','.join(['%s=%s' % (k, repr(icmpv6_values[k]))
for k, _ in inspect.getmembers(p_icmpv6)
if k in icmpv6_values])
icmpv6_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _icmpv6_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv6_str, icmpv6_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(icmpv6_str, str(p_icmpv6))
eq_(icmpv6_str, repr(p_icmpv6))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_llc_bpdu(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IEEE802_3)
llc_control = llc.ControlFormatU(0, 0, 0)
l = llc.llc(llc.SAP_BPDU, llc.SAP_BPDU, llc_control)
b = bpdu.ConfigurationBPDUs(flags=0,
root_priority=32768,
root_system_id_extension=0,
root_mac_address=self.src_mac,
root_path_cost=0,
bridge_priority=32768,
bridge_system_id_extension=0,
bridge_mac_address=self.dst_mac,
port_priority=128,
port_number=4,
message_age=1,
max_age=20,
hello_time=2,
forward_delay=15)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(l)
p.add_protocol(b)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac + self.src_mac + '\x05\xdc'
# llc !BBB
l_buf = ('\x42'
'\x42'
'\x03')
# bpdu !HBBBQIQHHHHH
b_buf = ('\x00\x00'
'\x00'
'\x00'
'\x00'
'\x80\x64\xaa\xaa\xaa\xaa\xaa\xaa'
'\x00\x00\x00\x04'
'\x80\x64\xbb\xbb\xbb\xbb\xbb\xbb'
'\x80\x04'
'\x01\x00'
'\x14\x00'
'\x02\x00'
'\x0f\x00')
buf = e_buf + l_buf + b_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_llc = protocols['llc']
p_bpdu = protocols['ConfigurationBPDUs']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IEEE802_3, p_eth.ethertype)
# llc
ok_(p_llc)
eq_(llc.SAP_BPDU, p_llc.dsap_addr)
eq_(llc.SAP_BPDU, p_llc.ssap_addr)
eq_(0, p_llc.control.modifier_function1)
eq_(0, p_llc.control.pf_bit)
eq_(0, p_llc.control.modifier_function2)
# bpdu
ok_(p_bpdu)
eq_(bpdu.PROTOCOL_IDENTIFIER, p_bpdu._protocol_id)
eq_(bpdu.PROTOCOLVERSION_ID_BPDU, p_bpdu._version_id)
eq_(bpdu.TYPE_CONFIG_BPDU, p_bpdu._bpdu_type)
eq_(0, p_bpdu.flags)
eq_(32768, p_bpdu.root_priority)
eq_(0, p_bpdu.root_system_id_extension)
eq_(self.src_mac, p_bpdu.root_mac_address)
eq_(0, p_bpdu.root_path_cost)
eq_(32768, p_bpdu.bridge_priority)
eq_(0, p_bpdu.bridge_system_id_extension)
eq_(self.dst_mac, p_bpdu.bridge_mac_address)
eq_(128, p_bpdu.port_priority)
eq_(4, p_bpdu.port_number)
eq_(1, p_bpdu.message_age)
eq_(20, p_bpdu.max_age)
eq_(2, p_bpdu.hello_time)
eq_(15, p_bpdu.forward_delay)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IEEE802_3}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ctrl_values = {'modifier_function1': 0,
'pf_bit': 0,
'modifier_function2': 0}
_ctrl_str = ','.join(['%s=%s' % (k, repr(ctrl_values[k]))
for k, v in inspect.getmembers(p_llc.control)
if k in ctrl_values])
ctrl_str = '%s(%s)' % (llc.ControlFormatU.__name__, _ctrl_str)
llc_values = {'dsap_addr': repr(llc.SAP_BPDU),
'ssap_addr': repr(llc.SAP_BPDU),
'control': ctrl_str}
_llc_str = ','.join(['%s=%s' % (k, llc_values[k])
for k, v in inspect.getmembers(p_llc)
if k in llc_values])
llc_str = '%s(%s)' % (llc.llc.__name__, _llc_str)
bpdu_values = {'flags': 0,
'root_priority': long(32768),
'root_system_id_extension': long(0),
'root_mac_address': self.src_mac,
'root_path_cost': 0,
'bridge_priority': long(32768),
'bridge_system_id_extension': long(0),
'bridge_mac_address': self.dst_mac,
'port_priority': 128,
'port_number': 4,
'message_age': float(1),
'max_age': float(20),
'hello_time': float(2),
'forward_delay': float(15)}
_bpdu_str = ','.join(['%s=%s' % (k, repr(bpdu_values[k]))
for k, v in inspect.getmembers(p_bpdu)
if k in bpdu_values])
bpdu_str = '%s(%s)' % (bpdu.ConfigurationBPDUs.__name__, _bpdu_str)
pkt_str = '%s, %s, %s' % (eth_str, llc_str, bpdu_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(llc_str, str(p_llc))
eq_(llc_str, repr(p_llc))
eq_(bpdu_str, str(p_bpdu))
eq_(bpdu_str, repr(p_bpdu))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_div_api(self):
e = ethernet.ethernet(self.dst_mac, self.src_mac, ether.ETH_TYPE_IP)
i = ipv4.ipv4()
u = udp.udp(self.src_port, self.dst_port)
pkt = e / i / u
ok_(isinstance(pkt, packet.Packet))
ok_(isinstance(pkt.protocols[0], ethernet.ethernet))
ok_(isinstance(pkt.protocols[1], ipv4.ipv4))
ok_(isinstance(pkt.protocols[2], udp.udp))
| |
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ISCSI Drivers for Dell EMC VMAX arrays based on REST.
"""
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.vmax import common
LOG = logging.getLogger(__name__)
@interface.volumedriver
class VMAXISCSIDriver(driver.ISCSIDriver):
"""ISCSI Drivers for VMAX using Rest.
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
2.2.3 - Pool aware scheduler(multi-pool) support
2.2.4 - Create CG from CG snapshot
2.3.0 - Name change for MV and SG for FAST (bug #1515181)
- Fix for randomly choosing port group. (bug #1501919)
- get_short_host_name needs to be called in find_device_number
(bug #1520635)
- Proper error handling for invalid SLOs (bug #1512795)
- Extend Volume for VMAX3, SE8.1.0.3
https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume
- Incorrect SG selected on an attach (#1515176)
- Cleanup Zoning (bug #1501938) NOTE: FC only
- Last volume in SG fix
- _remove_last_vol_and_delete_sg is not being called
for VMAX3 (bug #1520549)
- necessary updates for CG changes (#1534616)
- Changing PercentSynced to CopyState (bug #1517103)
- Getting iscsi ip from port in existing masking view
- Replacement of EMCGetTargetEndpoints api (bug #1512791)
- VMAX3 snapvx improvements (bug #1522821)
- Operations and timeout issues (bug #1538214)
2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634)
- SnapVX licensing checks for VMAX3 (bug #1587017)
- VMAX oversubscription Support (blueprint vmax-oversubscription)
- QoS support (blueprint vmax-qos)
- VMAX2/VMAX3 iscsi multipath support (iscsi only)
https://blueprints.launchpad.net/cinder/+spec/vmax-iscsi-multipath
2.5.0 - Attach and detach snapshot (blueprint vmax-attach-snapshot)
- MVs and SGs not reflecting correct protocol (bug #1640222)
- Storage assisted volume migration via retype
(bp vmax-volume-migration)
- Support for compression on All Flash
- Volume replication 2.1 (bp add-vmax-replication)
- rename and restructure driver (bp vmax-rename-dell-emc)
3.0.0 - REST based driver
- Retype (storage-assisted migration)
- QoS support
- Support for compression on All Flash
- Support for volume replication
- Support for live migration
- Support for Generic Volume Group
"""
VERSION = "3.0.0"
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI"
def __init__(self, *args, **kwargs):
super(VMAXISCSIDriver, self).__init__(*args, **kwargs)
self.active_backend_id = kwargs.get('active_backend_id', None)
self.common = (
common.VMAXCommon(
'iSCSI',
self.VERSION,
configuration=self.configuration,
active_backend_id=self.active_backend_id))
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a VMAX volume.
:param volume: the cinder volume object
:returns: provider location dict
"""
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: the cinder volume object
:param snapshot: the cinder snapshot object
:returns: provider location dict
"""
return self.common.create_volume_from_snapshot(
volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume.
:param volume: the cinder volume object
:param src_vref: the source volume reference
:returns: provider location dict
"""
return self.common.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
"""Deletes a VMAX volume.
:param volume: the cinder volume object
"""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: the cinder snapshot object
:returns: provider location dict
"""
src_volume = snapshot.volume
return self.common.create_snapshot(snapshot, src_volume)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: the cinder snapshot object
"""
src_volume = snapshot.volume
self.common.delete_snapshot(snapshot, src_volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume.
:param context: the context
:param volume: the cinder volume object
"""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume.
:param context: the context
:param volume: the cinder volume object
:param connector: the connector object
"""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
:param context: the context
:param volume: the cinder volume object
"""
pass
@staticmethod
def check_for_export(context, volume_id):
"""Make sure volume is exported.
:param context: the context
:param volume_id: the volume id
"""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
the format of the driver data is defined in smis_get_iscsi_properties.
Example return value:
.. code-block:: default
{
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': '12345678-1234-4321-1234-123456789012'
}
}
Example return value (multipath is enabled):
.. code-block:: default
{
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
'iqn.2010-10.org.openstack:volume-00002'],
'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
'target_luns': [1, 1]
}
}
:param volume: the cinder volume object
:param connector: the connector object
:returns: dict -- the iscsi dict
"""
device_info = self.common.initialize_connection(
volume, connector)
return self.get_iscsi_dict(device_info, volume)
def get_iscsi_dict(self, device_info, volume):
"""Populate iscsi dict to pass to nova.
:param device_info: device info dict
:param volume: volume object
:returns: iscsi dict
"""
try:
ip_and_iqn = device_info['ip_and_iqn']
is_multipath = device_info['is_multipath']
host_lun_id = device_info['hostlunid']
except KeyError as e:
exception_message = (_("Cannot get iSCSI ipaddresses, multipath "
"flag, or hostlunid. Exception is %(e)s.")
% {'e': six.text_type(e)})
raise exception.VolumeBackendAPIException(data=exception_message)
iscsi_properties = self.vmax_get_iscsi_properties(
volume, ip_and_iqn, is_multipath, host_lun_id)
LOG.info("iSCSI properties are: %(props)s",
{'props': iscsi_properties})
return {'driver_volume_type': 'iscsi',
'data': iscsi_properties}
@staticmethod
def vmax_get_iscsi_properties(volume, ip_and_iqn,
is_multipath, host_lun_id):
"""Gets iscsi configuration.
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the UUID of the volume
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:param volume: the cinder volume object
:param ip_and_iqn: list of ip and iqn dicts
:param is_multipath: flag for multipath
:param host_lun_id: the host lun id of the device
:returns: properties
"""
properties = {}
if len(ip_and_iqn) > 1 and is_multipath:
properties['target_portals'] = ([t['ip'] + ":3260" for t in
ip_and_iqn])
properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in
ip_and_iqn])
properties['target_luns'] = [host_lun_id] * len(ip_and_iqn)
properties['target_discovered'] = True
properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0]
properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260"
properties['target_lun'] = host_lun_id
properties['volume_id'] = volume.id
LOG.info("ISCSI properties: %(properties)s.",
{'properties': properties})
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
if hasattr(volume, 'provider_auth'):
auth = volume.provider_auth
if auth is not None:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return properties
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
self.common.terminate_connection(volume, connector)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: the cinder volume object
:param new_size: the required new size
"""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
self._stats = data
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
return self.common.manage_existing(volume, external_ref)
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: difference between old and new volume types.
Unused in driver.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(volume, new_type, host)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover volumes to a secondary host/ backend.
:param context: the context
:param volumes: the list of volumes to be failed over
:param secondary_id: the backend to be failed over to, is 'default'
if fail back
:param groups: replication groups
:returns: secondary_id, volume_update_list, group_update_list
"""
return self.common.failover_host(volumes, secondary_id, groups)
def create_group(self, context, group):
"""Creates a generic volume group.
:param context: the context
:param group: the group object
"""
self.common.create_group(context, group)
def delete_group(self, context, group, volumes):
"""Deletes a generic volume group.
:param context: the context
:param group: the group object
:param volumes: the member volumes
"""
return self.common.delete_group(
context, group, volumes)
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group snapshot.
:param context: the context
:param group_snapshot: the group snapshot
:param snapshots: snapshots list
"""
return self.common.create_group_snapshot(context,
group_snapshot, snapshots)
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group snapshot.
:param context: the context
:param group_snapshot: the grouop snapshot
:param snapshots: snapshots list
"""
return self.common.delete_group_snapshot(context,
group_snapshot, snapshots)
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates LUNs in group.
:param context: the context
:param group: the group object
:param add_volumes: flag for adding volumes
:param remove_volumes: flag for removing volumes
"""
return self.common.update_group(group, add_volumes,
remove_volumes)
def create_group_from_src(
self, context, group, volumes, group_snapshot=None,
snapshots=None, source_group=None, source_vols=None):
"""Creates the volume group from source.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the group
:param group_snapshot: the source volume group snapshot
:param snapshots: snapshots of the source volumes
:param source_group: the dictionary of a volume group as source.
:param source_vols: a list of volume dictionaries in the source_group.
"""
return self.common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots, source_group,
source_vols)
| |
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils.deprecation import RemovedInPip10Warning
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.pre,
cmdoptions.process_dependency_links,
cmdoptions.trusted_host,
cmdoptions.require_hashes,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines_enum = preprocess(content, options)
for line_number, line in lines_enum:
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def preprocess(content, options):
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
"""
lines_enum = enumerate(content.splitlines(), start=1)
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
return lines_enum
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
:param options: OptionParser options that we may update
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# percolate hash-checking option upward
elif opts.require_hashes:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
if opts.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
if opts.pre:
finder.allow_all_prereleases = True
if opts.process_dependency_links:
finder.process_dependency_links = True
if opts.trusted_hosts:
finder.secure_origins.extend(
("*", host, "*") for host in opts.trusted_hosts)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(lines_enum):
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = []
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def skip_regex(lines_enum, options):
"""
Skip lines that match '--skip-requirements-regex' pattern
Note: the regex pattern is only built once
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
pattern = re.compile(skip_regex)
lines_enum = filterfalse(
lambda e: pattern.search(e[1]),
lines_enum)
return lines_enum
| |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by iLO modules."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import management as ilo_management
from ironic.drivers.modules import ipmitool
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(IloManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=INFO_DICT)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = ilo_management.MANAGEMENT_PROPERTIES
self.assertEqual(expected,
task.driver.management.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.validate(task)
driver_info_mock.assert_called_once_with(task.node)
def test_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM]
self.assertEqual(
sorted(expected),
sorted(task.driver.management.
get_supported_boot_devices(task)))
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_next_boot(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
ilo_object_mock.get_one_time_boot.return_value = 'CDROM'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_device = boot_devices.CDROM
expected_response = {'boot_device': expected_device,
'persistent': False}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
ilo_object_mock.get_one_time_boot.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_persistent(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
ilo_mock.get_one_time_boot.return_value = 'Normal'
ilo_mock.get_persistent_boot_device.return_value = 'NETWORK'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_device = boot_devices.PXE
expected_response = {'boot_device': expected_device,
'persistent': True}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
ilo_mock.get_one_time_boot.assert_called_once_with()
ilo_mock.get_persistent_boot_device.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_one_time_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.get_boot_device,
task)
ilo_mock_object.get_one_time_boot.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_persistent_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_one_time_boot.return_value = 'Normal'
exc = ilo_error.IloError('error')
ilo_mock_object.get_persistent_boot_device.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.get_boot_device,
task)
ilo_mock_object.get_one_time_boot.assert_called_once_with()
ilo_mock_object.get_persistent_boot_device.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_ok(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.CDROM,
False)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_object_mock.set_one_time_boot.assert_called_once_with('CDROM')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_persistent_true(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE,
True)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock.update_persistent_boot.assert_called_once_with(
['NETWORK'])
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.set_one_time_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.set_boot_device,
task, boot_devices.PXE)
ilo_mock_object.set_one_time_boot.assert_called_once_with('NETWORK')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_persistent_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.update_persistent_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.set_boot_device,
task, boot_devices.PXE, True)
ilo_mock_object.update_persistent_boot.assert_called_once_with(
['NETWORK'])
def test_set_boot_device_invalid_device(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(ilo_common, 'update_ipmi_properties', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
spec_set=True, autospec=True)
def test_get_sensor_data(self, get_sensors_data_mock, update_ipmi_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.get_sensors_data(task)
update_ipmi_mock.assert_called_once_with(task)
get_sensors_data_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_ok(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
clean_step_mock = getattr(ilo_mock, 'fake-step')
ilo_management._execute_ilo_clean_step(
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_not_supported(self, get_ilo_object_mock,
log_mock):
ilo_mock = get_ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError("error")
clean_step_mock = getattr(ilo_mock, 'fake-step')
clean_step_mock.side_effect = exc
ilo_management._execute_ilo_clean_step(
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
self.assertTrue(log_mock.warn.called)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_fail(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
exc = ilo_error.IloError("error")
clean_step_mock = getattr(ilo_mock, 'fake-step')
clean_step_mock.side_effect = exc
self.assertRaises(exception.NodeCleaningFailure,
ilo_management._execute_ilo_clean_step,
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo(task)
clean_step_mock.assert_called_once_with(task.node, 'reset_ilo')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo_credential_ok(self, clean_step_mock):
info = self.node.driver_info
info['ilo_change_password'] = "fake-password"
self.node.driver_info = info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo_credential(task)
clean_step_mock.assert_called_once_with(
task.node, 'reset_ilo_credential', 'fake-password')
self.assertIsNone(
task.node.driver_info.get('ilo_change_password'))
self.assertEqual(task.node.driver_info['ilo_password'],
'fake-password')
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo_credential_no_password(self, clean_step_mock,
log_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo_credential(task)
self.assertFalse(clean_step_mock.called)
self.assertTrue(log_mock.info.called)
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_bios_to_default(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_bios_to_default(task)
clean_step_mock.assert_called_once_with(task.node,
'reset_bios_to_default')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_secure_boot_keys_to_default(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_secure_boot_keys_to_default(task)
clean_step_mock.assert_called_once_with(task.node,
'reset_secure_boot_keys')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_clear_secure_boot_keys(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.clear_secure_boot_keys(task)
clean_step_mock.assert_called_once_with(task.node,
'clear_secure_boot_keys')
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Operators that integrat with Google Cloud Build service."""
import re
from copy import deepcopy
from typing import Any, Dict, Iterable, Optional
from urllib.parse import unquote, urlparse
from airflow import AirflowException
from airflow.gcp.hooks.cloud_build import CloudBuildHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
REGEX_REPO_PATH = re.compile(r"^/p/(?P<project_id>[^/]+)/r/(?P<repo_name>[^/]+)")
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param body: The request body.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type body: dict
"""
def __init__(self, body: Dict) -> None:
self.body = deepcopy(body)
def _verify_source(self):
is_storage = "storageSource" in self.body["source"]
is_repo = "repoSource" in self.body["source"]
sources_count = sum([is_storage, is_repo])
if sources_count != 1:
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storageSource and repoSource."
)
def _reformat_source(self):
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self):
if "repoSource" not in self.body["source"]:
return
source = self.body["source"]["repoSource"]
if not isinstance(source, str):
return
self.body["source"]["repoSource"] = self._convert_repo_url_to_dict(source)
def _reformat_storage_source(self):
if "storageSource" not in self.body["source"]:
return
source = self.body["source"]["storageSource"]
if not isinstance(source, str):
return
self.body["source"]["storageSource"] = self._convert_storage_url_to_dict(source)
def process_body(self):
"""
Processes the body passed in the constructor
:return: the body.
:type: dict
"""
self._verify_source()
self._reformat_source()
return self.body
@staticmethod
def _convert_repo_url_to_dict(source):
"""
Convert url to repository in Google Cloud Source to a format supported by the API
Example valid input:
.. code-block:: none
https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name
"""
url_parts = urlparse(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.developers.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
source_dict = {"projectId": project_id, "repoName": repo_name, "branchName": "master"}
if url_parts.fragment:
source_dict["branchName"] = url_parts.fragment
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> Dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlparse(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict = {"bucket": url_parts.hostname, "object": url_parts.path[1:]}
if url_parts.fragment:
source_dict["generation"] = url_parts.fragment
return source_dict
class CloudBuildCreateBuildOperator(BaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param body: The request body.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type body: dict
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:type project_id: str
:param gcp_conn_id: The connection ID to use to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (for example v1 or v1beta1).
:type api_version: str
"""
template_fields = ("body", "gcp_conn_id", "api_version") # type: Iterable[str]
@apply_defaults
def __init__(self,
body: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.body = body
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
def _validate_inputs(self):
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, api_version=self.api_version)
body = BuildProcessor(body=self.body).process_body()
return hook.create_build(body=body, project_id=self.project_id)
| |
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from warnings import warn
import collections
import copy
def strip_blank_lines(l):
"Remove leading and trailing blank lines from a list of lines"
while l and not l[0].strip():
del l[0]
while l and not l[-1].strip():
del l[-1]
return l
class Reader:
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class ParseError(Exception):
def __str__(self):
message = self.args[0]
if hasattr(self, 'docstring'):
message = "{} in {!r}".format(message, self.docstring)
return message
class NumpyDocString(collections.Mapping):
"""Parses a numpydoc string to an abstract representation
Instances define a mapping from section title to structured data.
"""
sections = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Yields': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
def __init__(self, docstring, config={}):
orig_docstring = docstring
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = copy.deepcopy(self.sections)
try:
self._parse()
except ParseError as e:
e.docstring = orig_docstring
raise
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
self._error_location("Unknown section %s" % key, error=False)
else:
self._parsed_data[key] = val
def __iter__(self):
return iter(self._parsed_data)
def __len__(self):
return len(self._parsed_data)
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
desc = strip_blank_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):"
r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ParseError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
sections = list(self._read_sections())
section_names = {section for section, content in sections}
has_returns = 'Returns' in section_names
has_yields = 'Yields' in section_names
# We could do more tests, but we are not. Arbitrarily.
if has_returns and has_yields:
msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
for (section, content) in sections:
if not section.startswith('..'):
section = (s.capitalize() for s in section.split(' '))
section = ' '.join(section)
if self.get(section):
self._error_location("The section %s appears twice"
% section)
if section in ('Parameters', 'Returns', 'Yields', 'Raises',
'Warns', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
def _error_location(self, msg, error=True):
if hasattr(self, '_obj'):
# we know where the docs came from:
try:
filename = inspect.getsourcefile(self._obj)
except TypeError:
filename = None
msg = msg + (" in the docstring of %s in %s."
% (self._obj, filename))
if error:
raise ValueError(msg)
else:
warn(msg)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', r'\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
if param_type:
out += ['{} : {}'.format(param, param_type)]
else:
out += [param]
if desc and ''.join(desc).strip():
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':{}:`{}`'.format(role, func)
elif func_role:
link = ':{}:`{}`'.format(func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :{}: {}'.format(section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Yields',
'Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
try:
signature = str(inspect.signature(func))
except (AttributeError, ValueError):
# try to read signature, backward compat for older Python
argspec = inspect.getfullargspec(func)
signature = inspect.formatargspec(*argspec)
signature = '{}{}'.format(func_name, signature.replace('*', r'\*'))
except TypeError:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', r'\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. {}:: {}\n \n\n'.format(roles.get(self._role, ''),
func_name)
out += super().__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
self.show_inherited_members = config.get(
'show_inherited_class_members', True)
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append((name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, collections.Callable)
and self._is_show_member(name))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if (not name.startswith('_') and
(func is None or isinstance(func, property) or
inspect.isdatadescriptor(func))
and self._is_show_member(name))]
def _is_show_member(self, name):
if self.show_inherited_members:
return True # show all class members
if name not in self._cls.__dict__:
return False # class member is inherited, we do not show it
return True
| |
#!/usr/bin/python
# Create passphrase from dice rolls
from Tkinter import Tk, Frame, Button, LEFT, TOP, Label
from sys import exit
from json import load
# empty is a string of spaces to make the width of the labels good
EMPTY = ' '
DICE = 6 # There are 6 dice
DIE = 6 # there are 6 spots on a die
WORDS = 6 # We are generating 6 words
CAESAR = 13 # ROT13 constant
ALPHABET = 26 # Alphabet size
def fold(base, c):
return base + (((c-base)+CAESAR) % ALPHABET)
def caesar(c):
print c
o = ord(c)
if o < ord('a'):
result = fold(ord('A'), o)
else:
result = fold(ord('a'), o)
return chr(result)
# The class Connect holds a die value and connects it to the corresponding label
class Connect(Label):
def __init__(self, label):
self.label = label
self.value = -1
def clear(self):
self.value = -1
self.label.config(text=' ')
def set(self, value):
self.value = value
self.label.config(text = str(value+1))
def setPick(self):
self.label.config(bg = 'red')
def clearPick(self):
self.label.config(bg = 'white')
# the class Switch is the central data repository
class Switch():
def __init__(self):
# row and column can go out of range
self.columnIndex = 0
self.rowIndex = 0
self.connectList = []
self.wordLabels = []
self.resultWords = []
wordFile = open('randomWords.json', 'r')
self.wordList = load(wordFile)
wordFile.close()
def add(self, row, column, connect):
if column is 0:
rowList = []
self.connectList.append(rowList)
else:
rowList = self.connectList[row]
rowList.append(connect)
def iterateDice(self, function):
for row in self.connectList:
for item in row:
function(item)
def iterateWords(self, function):
for label in self.wordLabels:
function(label)
def clear(self):
self.iterateDice(lambda item: item.clear())
self.iterateWords(lambda label: label.config(text = EMPTY))
for index in range(len(self.resultWords)):
self.resultWords[index] = ''
self.rowIndex = 0
self.columnIndex = 0
self.setPick()
def clearPick(self):
self.iterateDice(lambda item: item.clearPick())
def inRange(self):
return 0 <= self.rowIndex < WORDS and 0 <= self.columnIndex < DICE
# row and column must be in range when this is called
def getConnect(self):
return self.connectList[self.rowIndex][self.columnIndex]
def setPick(self):
self.clearPick()
if self.inRange():
self.getConnect().setPick()
def set(self, value):
if self.inRange():
self.getConnect().set(value)
self.setWord()
self.columnIndex += 1
if self.columnIndex >= DICE:
self.rowIndex += 1
self.columnIndex = 0
self.setPick()
def changePick(self, rowArg, columnArg):
self.rowIndex = rowArg
self.columnIndex = columnArg
self.setPick()
def setWord(self):
if 0 <= self.rowIndex < WORDS:
row = self.connectList[self.rowIndex]
result = 0
multiply = 1
# this is in reverse order because the high order digit is on the left
if all(item.value >= 0 for item in row):
for index in range(DICE-1, -1, -1):
result += multiply * row[index].value
multiply *= DIE
word = self.wordList[result]
self.resultWords[self.rowIndex] = word
self.wordLabels[self.rowIndex].config(text = word)
def join(self):
return (' '.join(self.resultWords)).strip()
def obscure(self):
result = map((lambda word: map (caesar, word)), self.resultWords)
print result
return (' '.join(map((lambda l: ''.join(l)), result))).strip()
# Passphrase is the UI class, it also initializes the Switch class
class Passphrase(Frame, Switch):
def __init__(self, parent, switch):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.switch = switch
self.initUI()
self.switch.clear()
def initUI(self):
self.parent.title("Create Passphrase")
self.makeInstructions()
self.makeKeys()
self.makeKeyboard()
diceContainer = Frame(self)
self.makeDice(diceContainer)
self.makeWords(diceContainer)
diceContainer.grid(row = 2, column = 0)
self.makeButtons()
self.grid()
def makeInstructions(self):
instructions = "Press a numbered button or enter 1-6 to enter a dice roll. "
instructions += "When the row is full the word will appear.\n"
instructions += "Click on a cell to edit entered rolls\n"
instructions += "Text copied to clipboard may disappear when the program exits."
label = Label(self, text = instructions, wraplength=300)
label.grid(row = 0, column = 0)
def makeKeyboard(self):
keyset = frozenset(["1", "2", "3", "4", "5", "6"])
def key(event):
if event.char in keyset:
self.switch.set(ord(event.char) - ord("1"))
self.bind("<Key>", key)
self.focus_set()
def makePress(self, number):
return lambda: self.switch.set(number)
def makeKeys(self):
keyContainer = Frame(self)
for number in range (0, DICE):
button = Button(keyContainer, text = str(number+1), command = self.makePress(number))
button.grid(row = 1, column=number)
keyContainer.grid(row = 1, column = 0)
def makeClick(self, rowIndex, columnIndex):
return lambda event: self.switch.changePick(rowIndex, columnIndex)
def makeDice(self, container):
for row in range (0, WORDS):
for column in range (0, DICE):
label = Label(container, text = ' ', borderwidth = 1, relief = 'solid')
label.grid(row = row, column = column)
label.bind("<Button-1>", self.makeClick(row, column))
self.switch.add(row, column, Connect(label))
def makeWords(self, container):
for number in range (0, WORDS):
label = Label(container, text=EMPTY, borderwidth=1, relief='solid')
label.grid(row = number, column = DICE)
self.switch.wordLabels.append(label)
self.switch.resultWords.append('')
def copy(self):
string = self.switch.join()
self.parent.clipboard_clear()
self.parent.clipboard_append(string)
def copyObscure(self):
string = self.switch.obscure()
self.parent.clipboard_clear()
self.parent.clipboard_append(string)
def makeButtons(self):
container = Frame(self)
copy = Button(container, text = 'copy\nplain', command=self.copy)
copy.grid(row = 0, column = 0, rowspan = 2)
copy = Button(container, text = 'copy\nobscure', command=self.copyObscure)
copy.grid(row = 0, column = 1, rowspan = 2)
copy = Button(container, text = 'clear', command = self.switch.clear)
copy.grid(row = 0, column = 2, rowspan = 2)
leave = Button(container, text = 'exit', command = exit)
leave.grid(row = 0, column = 3, rowspan = 2)
container.grid(row = 3, column = 0)
def main():
root = Tk()
switch = Switch()
Passphrase(root, switch)
switch.setPick()
root.mainloop()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for per project aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests for per project aggregation of
aggregator.projectcounts.
"""
import aggregator
import testcases
import os
import datetime
import nose
class DailyProjectAggregationTestCase(testcases.ProjectcountsDataTestCase):
"""TestCase for 'daily' project aggregation functions"""
def test_daily_csv_non_existing_csv_empty_data(self):
date = datetime.date(2014, 7, 4)
csv_data = {}
nose.tools.assert_raises(
RuntimeError,
aggregator.update_daily_csv,
self.data_dir_abs,
'enwiki',
csv_data,
date,
date)
def test_daily_csv_non_existing_csv_existing_data_single(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
date = datetime.date(2014, 5, 16)
csv_data = {'2014-05-16': '2014-05-16,1,2,3'}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
date, date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-16,1,2,3'
])
def test_daily_csv_non_existing_csv_existing_data_multiple(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-13': '2014-05-13,4,5,6',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-13,4,5,6',
'2014-05-14,7,8,9',
'2014-05-15,10,11,12',
])
def test_daily_csv_non_existing_csv_existing_data_outside_date(self):
date = datetime.date(2014, 5, 17)
csv_data = {'2014-05-16': '2014-05-16,1,2,3'}
nose.tools.assert_raises(
RuntimeError,
aggregator.update_daily_csv,
self.data_dir_abs,
'enwiki',
csv_data,
date,
date)
def test_daily_csv_existing_csv_existing_data_without_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-16,47,500'
])
date = datetime.date(2014, 5, 16)
csv_data = {'2014-05-16': '2014-05-16,47,167'}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
date, date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-16,47,500'
])
def test_daily_csv_existing_csv_existing_data_with_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-16,47,500'
])
date = datetime.date(2014, 5, 16)
csv_data = {'2014-05-16': '2014-05-16,47,167'}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
date, date, force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-16,47,167'
])
def test_daily_csv_existing_csv_existing_data_multiple_with_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-14,47,503',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-13': '2014-05-13,4,5,6',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date,
force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,4,5,6',
'2014-05-14,7,8,9',
'2014-05-15,10,11,12',
'2014-05-16,47,505',
])
def test_daily_csv_bad_dates_outside_data_without_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-14,47,503',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
bad_dates = [
datetime.date(2014, 5, 4)
]
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-13': '2014-05-13,4,5,6',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates=bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-14,47,503',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
def test_daily_csv_bad_dates_outside_data_with_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-14,47,503',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
bad_dates = [
datetime.date(2014, 5, 4)
]
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-13': '2014-05-13,4,5,6',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates=bad_dates,
force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,4,5,6',
'2014-05-14,7,8,9',
'2014-05-15,10,11,12',
'2014-05-16,47,505',
])
def test_daily_csv_bad_dates_without_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
bad_dates = [
datetime.date(2014, 5, 13),
datetime.date(2014, 5, 14),
datetime.date(2014, 5, 16)
]
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates=bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
def test_daily_csv_bad_dates_with_force(self):
enwiki_file_abs = os.path.join(self.daily_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-13,47,502',
'2014-05-15,47,504',
'2014-05-16,47,505',
])
first_date = datetime.date(2014, 5, 13)
last_date = datetime.date(2014, 5, 15)
bad_dates = [
datetime.date(2014, 5, 13),
datetime.date(2014, 5, 14),
datetime.date(2014, 5, 16)
]
csv_data = {
'2014-05-12': '2014-05-12,1,2,3',
'2014-05-13': '2014-05-12,4,5,6',
'2014-05-14': '2014-05-14,7,8,9',
'2014-05-15': '2014-05-15,10,11,12',
'2014-05-16': '2014-05-16,13,14,15',
}
aggregator.update_daily_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates=bad_dates,
force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014-05-12,47,501',
'2014-05-15,10,11,12',
'2014-05-16,47,505',
])
| |
#!/usr/bin/env python
#
# Copyright (c) 2011 GhostBSD
#
# See COPYING for license terms.
#
# ginstall.py v 1.2 Friday, September 2012 12:27:53 Kamil Kajczynski
# importation of tool need.
import os
import os.path
from subprocess import Popen, PIPE, STDOUT, call
import getpass
from time import sleep
# Path needed for the installer.
PATH = "/tmp"
PC_SYS = "sudo pc-sysinstall"
CFG = "%s/pcinstall.cfg" % PATH
tmp = "/tmp/.gbi"
installer = "/usr/local/etc/gbi/"
query = "sh /usr/local/etc/gbi/backend-query/"
if not os.path.exists(tmp):
os.makedirs(tmp)
disk_part = '%sdisk-part.sh' % query
disk_label = '%sdisk-label.sh' % query
add_part = 'gpart add'
memory = 'sysctl hw.physmem'
disk_list = '%sdisk-list.sh' % query
FIN = """ Installation is complete. You need to restart the
computer in order to use the new installation.
You can continue to use this live CD, although
any changes you make or documents you save will
not be preserved on reboot."""
# Erasing the file after a restart of the installer.
if os.path.exists(CFG):
os.remove(CFG)
if os.path.exists('%s/label' % PATH):
os.remove('%s/label' % PATH)
if os.path.exists('%s/left' % PATH):
os.remove('%s/left' % PATH)
#some commonly used pieces of text:
bootMenu = """ Boot manager option\n---------------------------------
1: BSD boot Manager\n---------------------------------
2: No boot manager\n---------------------------------"""
# The beginning of the installer.
class Ginstall:
def __init__(self):
call('clear', shell=True)
cfg = open(CFG, 'w')
cfg.writelines('installMode=fresh\n')
cfg.writelines('installInteractive=no\n')
cfg.writelines('installType=FreeBSD\n')
cfg.writelines('installMedium=dvd\n')
cfg.writelines('packageType=livecd\n')
# choosing disk part.
print ' Disk Disk Name'
p = Popen(disk_list, shell=True, stdout=PIPE, close_fds=True)
for line in p.stdout:
print '-----------------------------------'
print ' %s' % line.rstrip()
print '-----------------------------------'
DISK = raw_input("\n\n Select a disk to install GhostBSD: ")
cfg.writelines('disk0=%s\n' % DISK)
# Install option part.
while True:
call('clear', shell=True)
print ' Installation Options'
print '---------------------------------------------'
print ' 1: Use the entire disk %s' % DISK
print '---------------------------------------------'
print ' 2: Partition disk %s with auto labeling' % DISK
print '---------------------------------------------'
print ' 3: Customize disk %s partition (Advanced)' % DISK
print '---------------------------------------------'
INSTALL = raw_input('\n\nChoose an option(1, 2, 3): ')
call('clear', shell=True)
# First option: installing in the entire disk.
if INSTALL == "1":
while True:
# Boot manger selection
call('clear', shell=True)
print bootMenu
BOOT = raw_input('\n\nChose an option(1, 2): ')
if BOOT == '1':
BMANAGER = 'bootManager=bsd\n'
break
elif BOOT == '2':
BMANAGER = 'bootManager=none\n'
break
else:
print "Chose 1 or 2."
sleep(1)
cfg.writelines('partition=all\n')
cfg.writelines(BMANAGER)
cfg.writelines('commitDiskPart\n')
DINFO = '%sdisk-info.sh %s' % (query, DISK)
p = Popen(DINFO, shell=True, stdout=PIPE, close_fds=True)
for line in p.stdout:
NUMBER = int(line.rstrip())
ram = Popen(memory, shell=True, stdout=PIPE, close_fds=True)
mem = ram.stdout.read()
SWAP = int(mem.partition(':')[2].strip()) / (1024 * 1024)
ROOT = NUMBER - SWAP
cfg.writelines('disk0-part=UFS %s /\n' % ROOT)
cfg.writelines('disk0-part=SWAP 0 none\n')
cfg.writelines('commitDiskLabel\n')
break
# Second option slice partition With auto labels.
elif INSTALL == "2":
while True:
call('clear', shell=True)
print bootMenu
BOOT = raw_input('\n\nChoose an option(1, 2): ')
if BOOT == '1':
BMANAGER = 'bootManager=bsd\n'
break
elif BOOT == '2':
BMANAGER = 'bootManager=none\n'
break
else:
print "Choose 1 or 2."
sleep(1)
while True:
call('clear', shell=True)
print ' Slice Size System'
print '---------------------------------'
#print '---------------------------------'
DLIST = '%s %s' % (disk_part, DISK)
p = Popen(DLIST, shell=True, stdout=PIPE, close_fds=True)
for line in p.stdout:
#print '---------------------------------'
print ' %s' % line.rstrip()
print '---------------------------------'
DPART = raw_input("(d)elete (c)reate (n)ext: ")
if DPART == "d":
DELPART = raw_input('Select the slice to delete(s1, s2, s3 or s4): ')
call('gpart delete -i %s %s' % (DELPART[-1], DISK), shell=True)
#call('%s delete-part %s%s' % (PC_SYS, DISK, DELPART), shell=True)
print "delete " + DISK + DELPART
elif DPART == "c":
CPART = int(raw_input('Enter size of partition to create: '))
call('%s create-part %s %s' % (PC_SYS, DISK, CPART))
elif DPART == "n":
while True:
SLICE = raw_input("Select the slice you wish to install GhostBSD to (s1' s2' s3 or s4): ")
if SLICE == 's1' or SLICE == 's2' or SLICE == 's3' or SLICE == 's4':
cfg.writelines('partition=%s' % SLICE)
break
cfg.writelines(BMANAGER)
cfg.writelines('commitDiskPart\n')
PART = int(raw_input("Enter the size of partition %s%s: " % (DISK, SLICE)))
ram = Popen(memory, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
mem = ram.stdout.read()
SWAP = int(mem.partition(':')[2].strip()) / (1024 * 1024)
ROOT = PART - SWAP
cfg.writelines('disk0-part=UFS %s /\n' % ROOT)
cfg.writelines('disk0-part=SWAP 0 none\n')
cfg.writelines('commitDiskLabel\n')
break
else:
print 'choose (d)elete (c)reate (n)ext'
sleep(1)
break
# 3rd option advanced partitioning.
elif INSTALL == '3':
while True:
call('clear', shell=True)
print bootMenu
BOOT = raw_input('\n\nChose an option(1, 2): ')
if BOOT == '1':
BMANAGER = 'bootManager=bsd\n'
break
elif BOOT == '2':
BMANAGER = 'bootManager=none\n'
break
while True:
call('clear', shell=True)
print ' Slice Size System'
print '---------------------------------'
#print '---------------------------------'
DLIST = '%s %s' % (disk_part, DISK)
p = Popen(DLIST, shell=True, stdout=PIPE, close_fds=True)
for line in p.stdout:
#print '---------------------------------'
print ' %s' % line.rstrip()
print '---------------------------------'
DPART = raw_input("(d)elete (c)reate (n)ext: ")
if DPART == "d":
DELPART = raw_input('Select the slice to delete(s1, s2, s3 or s4): ')
call('gpart delete -i %s %s' % (DELPART[-1], DISK), shell=True)
#call('%s delete-part %s%s' % (PC_SYS, DISK, DELPART), shell=True)
print "delete " + DISK + DELPART
elif DPART == "c":
CPART = int(raw_input('Enter size of partition to create: '))
call('%s create-part %s %s' % (PC_SYS, DISK, CPART))
elif DPART == "n":
while True:
SLICE = raw_input("Select the slice you wish to install GhostBSD to (s1' s2' s3 or s4): ")
if SLICE == 's1' or SLICE == 's2' or SLICE == 's3' or SLICE == 's4':
cfg.writelines('partition=%s' % SLICE)
break
cfg.writelines(BMANAGER)
cfg.writelines('commitDiskPart\n')
PART = int(raw_input("Enter the partition size of %s%s: " % (DISK, SLICE)))
break
while True:
call('clear', shell=True)
print ' Partition label'
print ' fs size label'
print '------------------------'
if os.path.exists('%s/label' % PATH):
r = open('%s/label' % PATH, 'r')
line = r.read()
print ' %s' % line
print '\n'
print '--------------------------'
ARN = raw_input("\n(a)dd (r)eset (n)ext: ")
else:
print '\n\n\n\n\n-------------------------'
ARN = raw_input("(a)dd (r)eset (n)ext: ")
if ARN == 'a':
call('clear', shell=True)
print 'File System'
print '1: UFS'
print '2: UFS+S'
print '3: UFS+J'
print '4: ZFS'
print '5: SWAP'
FS = raw_input("Choose an File System(1, 2, 3, 4 or 5): ")
if FS == '1':
FSYS = 'UFS'
elif FS == '2':
FSYS = 'UFS+S'
elif FS == '3':
FSYS = 'UFS+J'
elif FS == '4':
FSYS = 'ZFS'
elif FS == '5':
FSYS = 'SWAP'
call('clear', shell=True)
print 'Partition Label Size'
print '\n'
if os.path.exists('%s/left' % PATH):
r = open('%s/left' % PATH, 'r')
left = r.read()
print 'Size: %sMB left' % left
else:
print 'Size: %sMB left' % PART
print '\n'
SIZE = int(raw_input("Enter the size: "))
LEFT = PART - SIZE
lf = open('%s/left' % PATH, 'w')
lf.writelines('%s' % LEFT)
lf.close()
call('clear', shell=True)
print 'Mount Point List'
print '/ /home /root'
print '/tmp /usr /var'
print 'none for swap.'
MP = raw_input('Enter a mount point: ')
f = open('%s/label' % PATH, 'a')
f.writelines('%s %s %s \n' % (FSYS, SIZE, MP))
f.close()
elif ARN == 'r':
if os.path.exists('%s/label' % PATH):
os.remove('%s/label' % PATH)
if os.path.exists('%s/left' % PATH):
os.remove('%s/left' % PATH)
elif ARN == 'n':
r = open('%s/label' % PATH, 'r')
i = r.readlines()
linecounter = 0
for line in i:
#print line
cfg.writelines('disk0-part=%s\n' % i[linecounter].rstrip())
linecounter += 1
cfg.writelines('commitDiskLabel\n')
break
break
# Hostname and network setting.
HOSTNAME = raw_input('Type in your hostname: ')
cfg.writelines('hostname=%s\n' % HOSTNAME)
cfg.writelines('netDev=AUTO-DHCP\n')
cfg.writelines('netSaveDev=AUTO-DHCP\n')
# Root Password.
call('clear', shell=True)
print ' Root Password'
print '----------------'
while True:
RPASS = getpass.getpass("\n Password: ")
RRPASS = getpass.getpass("\n Confirm Password: ")
if RPASS == RRPASS:
cfg.writelines('rootPass=%s\n' % RPASS)
break
else:
print "Password and password confirmation don't match. Try again!"
sleep(1)
# User setting.
call('clear', shell=True)
USER = raw_input(" Username: ")
cfg.writelines('userName=%s\n' % USER)
NAME = raw_input(" Real Name: ")
cfg.writelines('userComment=%s\n' % NAME)
while True:
UPASS = getpass.getpass(" Password: ")
RUPASS = getpass.getpass(" Confirm Password: ")
if UPASS == RUPASS:
cfg.writelines('userPass=%s\n' % UPASS)
break
else:
print "Password and password confirmation don't match. Try again!"
sleep(1)
SHELL = raw_input("Shell(sh csh, tcsh, bash, rbash)- if you don't know just press Enter: ")
if SHELL == 'sh':
cfg.writelines('userShell=/bin/sh\n')
elif SHELL == 'csh':
cfg.writelines('userShell=/bin/csh\n')
elif SHELL == 'tcsh':
cfg.writelines('userShell=/bin/tcsh\n')
elif SHELL == 'bash':
cfg.writelines('userShell=/usr/local/bin/bash\n')
elif SHELL == 'rbash':
cfg.writelines('userShell=/usr/local/bin/rbash\n')
else:
cfg.writelines('userShell=/usr/local/bin/bash\n')
cfg.writelines('userHome=/home/%s\n' % USER)
cfg.writelines('userGroups=wheel,operator\n')
cfg.writelines('commitUser\n')
cfg.close()
# Starting the installation.
call('clear', shell=True)
GINSTALL = raw_input("Ready To install GhostBSD now?(yes or no): ")
if GINSTALL == "yes":
print "install"
#call("sudo umount -f /media", shell=True)
call("%s -c %s" % (PC_SYS, CFG), shell=True)
elif GINSTALL == "no":
quit()
call('clear', shell=True)
print FIN
RESTART = raw_input('Restart(yes or no): ')
if RESTART == 'yes' or RESTART == 'YES' or RESTART == 'y' or RESTART == 'Y':
call('sudo reboot', shell=True)
if RESTART == 'no' or RESTART == 'NO' or RESTART == 'n' or RESTART == 'N':
quit()
Ginstall()
| |
"""
pyboard interface
This module provides the Pyboard class, used to communicate with and
control the pyboard over a serial USB connection.
Example usage:
import pyboard
pyb = pyboard.Pyboard('/dev/ttyACM0')
pyb.enter_raw_repl()
pyb.exec('pyb.LED(1).on()')
pyb.exit_raw_repl()
To run a script from the local machine on the board and print out the results:
import pyboard
pyboard.execfile('test.py', device='/dev/ttyACM0')
This script can also be run directly. To execute a local script, use:
python pyboard.py test.py
"""
import time
import serial
import pkg_resources
from contextlib import contextmanager
import posixpath
class PyboardError(BaseException):
pass
class Pyboard:
def __init__(self, serial_device):
self.serial_device = serial_device
self.serial = serial.Serial(serial_device)
self.in_raw_repl = False
self.pybkick_lib_active = False
def __repr__(self):
return "{}(serial_device={})".format(self.__class__.__name__, repr(self.serial_device))
def close(self):
self.serial.close()
def read_until(self, min_num_bytes, ending, timeout=10):
data = self.serial.read(min_num_bytes)
timeout_count = 0
while True:
if self.serial.inWaiting() > 0:
data = data + self.serial.read(self.serial.inWaiting())
time.sleep(0.01)
timeout_count = 0
elif data.endswith(ending):
break
else:
timeout_count += 1
if timeout_count >= 10 * timeout:
break
time.sleep(0.1)
return data
def enter_raw_repl(self):
assert not self.in_raw_repl, "raw_repl is already active!"
self.serial.write(b'\r\x03') # ctrl-C: interrupt any running program
self.serial.write(b'\r\x01') # ctrl-A: enter raw REPL
self.serial.write(b'\x04') # ctrl-D: soft reset
data = self.read_until(1, b'to exit\r\n>')
if not data.endswith(b'raw REPL; CTRL-B to exit\r\n>'):
print(data)
raise PyboardError('could not enter raw repl')
self.in_raw_repl = True
def exit_raw_repl(self):
assert self.in_raw_repl, "raw_repl was not active!"
self.serial.write(b'\r\x02') # ctrl-B: enter friendly REPL
self.in_raw_repl = False
@contextmanager
def raw_repl(self):
self.enter_raw_repl()
yield
self.exit_raw_repl()
def eval(self, expression):
"""Execute an expression on the pyboard, returning it's value back to Python.
Only works for expressions that return reprs that can be evaled
"""
assert self.in_raw_repl, "raw_repl must be active!"
eval_expression = '__import__(\'sys\').stdout.write(repr({}))'.format(expression)
returned_expression = self.exec(eval_expression)
try:
return eval(returned_expression)
except SyntaxError:
import pdb
pdb.set_trace()
raise RuntimeError("Invalid python returned: %s" % returned_expression)
def exec(self, command):
command_bytes = bytes(command, encoding='ascii')
for i in range(0, len(command_bytes), 32):
self.serial.write(command_bytes[i:min(i+32, len(command_bytes))])
time.sleep(0.01)
self.serial.write(b'\x04')
data = self.serial.read(2)
if data != b'OK':
raise PyboardError('could not exec command')
data = self.read_until(2, b'\x04>')
if not data.endswith(b'\x04>'):
print(data)
raise PyboardError('timeout waiting for EOF reception')
if data.startswith(b'Traceback') or data.startswith(b' File '):
raise PyboardError(data, command)
return data[:-2]
def execfile(self, filename):
with open(filename) as f:
pyfile = f.read()
return self.exec(pyfile)
def get_time(self):
t = str(self.eval('pyb.RTC().datetime()'), encoding='ascii')[1:-1].split(', ')
return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])
def ls(self, dir='.'):
statement = '__import__("os").listdir("{dir}")'.format(dir=dir)
return self.eval(statement)
def read_file(self, file_path):
expression = "open({file_path}).read()".format(
file_path=repr(file_path)
)
foo = self.eval(expression)
return foo
def write_file(self, file_path, data='', mode='w'):
"""This function intentionally involved using context managers, or more sophisticated
python syntaxes which might auto-close, so that we can have a single eval-able expression
that returns the number of bytes written.
"""
self.exec('tmpfile = open({file_path},{mode})'.format(
file_path=repr(file_path),
mode=repr(mode))
)
expression = "tmpfile.write({data})".format(
data=repr(data)
)
result = self.eval(expression)
self.exec('tmpfile.close()')
return result
def mkdir(self, dir_path):
"""Make a directory if it does not exist.
"""
if not self.file_exists(dir_path):
statement = 'import os;os.mkdir({dir_path})'.format(dir_path=repr(dir_path))
self.exec(statement)
def file_exists(self, file_path):
dir_path, file_name = posixpath.split(file_path)
try:
listing = self.ls(dir_path)
except PyboardError:
return False
return file_name in listing
def rm(self, file_path):
statement = '__import__("os").unlink({file_path})'.format(file_path=repr(file_path))
return self.exec(statement)
def rmdir(self, dir_path):
pass
@contextmanager
def pybkick_lib(self, lib_file='pybkick_lib.py'):
"""Push a helpful library of code that makes remote file operations
much easier. Also activates a raw_repl.
"""
from pybkick import __version__ as pybkick_version
lib_content = pkg_resources.resource_string('pybkick.micropython', lib_file).decode('utf-8').format(
version = pybkick_version,
)
raw_repl = self.raw_repl()
raw_repl.__enter__()
try:
self.write_file(lib_file, lib_content)
self.pybkick_lib_active = True
yield
finally:
self.rm(lib_file)
raw_repl.__exit__(None, None, None)
self.pybkick_lib_active = False
def execfile(filename, device='/dev/ttyACM0'):
pyb = Pyboard(device)
pyb.enter_raw_repl()
output = pyb.execfile(filename)
print(str(output, encoding='ascii'), end='')
pyb.exit_raw_repl()
pyb.close()
def run_test(device):
pyb = Pyboard(device)
pyb.enter_raw_repl()
print('opened device {}'.format(device))
pyb.exec('import pyb') # module pyb no longer imported by default, required for pyboard tests
print('seconds since boot:', pyb.get_time())
pyb.exec('def apply(l, f):\r\n for item in l:\r\n f(item)\r\n')
pyb.exec('leds=[pyb.LED(l) for l in range(1, 5)]')
pyb.exec('apply(leds, lambda l:l.off())')
## USR switch test
pyb.exec('switch = pyb.Switch()')
for i in range(2):
print("press USR button")
pyb.exec('while switch(): pyb.delay(10)')
pyb.exec('while not switch(): pyb.delay(10)')
print('USR switch passed')
## accel test
if True:
print("hold level")
pyb.exec('accel = pyb.Accel()')
pyb.exec('while abs(accel.x()) > 10 or abs(accel.y()) > 10: pyb.delay(10)')
print("tilt left")
pyb.exec('while accel.x() > -10: pyb.delay(10)')
pyb.exec('leds[0].on()')
print("tilt forward")
pyb.exec('while accel.y() < 10: pyb.delay(10)')
pyb.exec('leds[1].on()')
print("tilt right")
pyb.exec('while accel.x() < 10: pyb.delay(10)')
pyb.exec('leds[2].on()')
print("tilt backward")
pyb.exec('while accel.y() > -10: pyb.delay(10)')
pyb.exec('leds[3].on()')
print('accel passed')
print('seconds since boot:', pyb.get_time())
pyb.exec('apply(leds, lambda l:l.off())')
pyb.exit_raw_repl()
pyb.close()
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description='Run scripts on the pyboard.')
cmd_parser.add_argument('--device', default='/dev/ttyACM0', help='the serial device of the pyboard')
cmd_parser.add_argument('--test', action='store_true', help='run a small test suite on the pyboard')
cmd_parser.add_argument('files', nargs='*', help='input files')
args = cmd_parser.parse_args()
if args.test:
run_test(device=args.device)
for file in args.files:
execfile(file, device=args.device)
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Holger Nahrstaedt
# See COPYING for license details.
"""
Helper function for wavelet denoising
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import sys as sys
from ._pyyawt import *
__all__ = ['orthfilt', 'biorfilt', 'dbwavf', 'coifwavf', 'symwavf', 'legdwavf', 'biorwavf', 'rbiorwavf', 'wfilters', 'wmaxlev', 'dwtmode']
def orthfilt(w):
"""
orthfilt is an utility function for obtaining analysis and synthesis filter set of given orthogonal wavelets including haar, daubechies, coiflets and symlets
Parameters
----------
w: array_like
scaling filter
Returns
-------
Lo_D: array_like
lowpass analysis filter
Hi_D: array_like
highpass analysis filter
Lo_R: array_like
lowpass synthesis filter
Hi_R: array_like
highpass synthesis filter
Examples
--------
F = dbwavf("db2")
[lo_d,hi_d,lo_r,hi_r]=orthfilt(F)
"""
m1 = 1
n1 = w.shape[0]
Lo_D = np.zeros(n1,dtype=np.float64)
Hi_D = np.zeros(n1,dtype=np.float64)
Lo_R = np.zeros(n1,dtype=np.float64)
Hi_R = np.zeros(n1,dtype=np.float64)
_orthfilt(w,Lo_D,Hi_D,Lo_R,Hi_R)
return Lo_D,Hi_D,Lo_R,Hi_R
def biorfilt(df,rf):
"""
biorfilt is an utility function for obtaining analysis and synthesis filter set of given bi-orthogonal spline wavelets. DF and RF should be output of biorfilt result with the same length.
Parameters
----------
df: array_like
analysis scaling filter
rf: array_like
synthesis scaling filter
Returns
-------
Lo_D: array_like
lowpass analysis filter
Hi_D: array_like
highpass analysis filter
Lo_R: array_like
lowpass synthesis filter
Hi_R: array_like
highpass synthesis filter
Examples
--------
RF,DF = biorwavf('bior3.3')
[lo_d,hi_d,lo_r,hi_r]=biorfilt(DF,RF)
"""
m1 = 1
n1 = df.shape[0]
Lo_D = np.zeros(n1,dtype=np.float64)
Hi_D = np.zeros(n1,dtype=np.float64)
Lo_R = np.zeros(n1,dtype=np.float64)
Hi_R = np.zeros(n1,dtype=np.float64)
_biorfilt(df,rf,Lo_D,Hi_D,Lo_R,Hi_R)
return Lo_D,Hi_D,Lo_R,Hi_R
def dbwavf(wname):
"""
dbwavf is an utility function for obtaining scaling filter of daubechies wavelet.
Parameters
----------
wname: str
wavelet name, 'db1' to 'db36'
Returns
-------
F: array_like
scaling filter
Examples
--------
F = dbwavf("db2")
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_DAUBECHIES):
raise Exception("Wrong wavelet name!")
lowPass = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
_dbwavf(wname.encode(),b'Lo_R',lowPass)
return lowPass
def coifwavf(wname):
"""
coifwavf is an utility function for obtaining scaling filter of coiflets wavelet.
Parameters
----------
wname: str
wavelet name, 'coif1' to 'coif5'
Returns
-------
F: array_like
scaling filter
Examples
--------
F = coifwavf('coif3')
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_COIFLETS):
raise Exception("Wrong wavelet name!")
lowPass = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
_coifwavf(wname.encode(),b'Lo_R',lowPass)
return lowPass
def symwavf(wname):
"""
symwavf is an utility function for obtaining scaling filter of symlets wavelet.
Parameters
----------
wname: str
wavelet name, 'sym2' to 'sym20'
Returns
-------
F: array_like
scaling filter
Examples
--------
F = symwavf('sym7')
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_SYMLETS):
raise Exception("Wrong wavelet name!")
lowPass = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
_symwavf(wname.encode(),b'Lo_R',lowPass)
return lowPass
def legdwavf(wname):
"""
legdwavf is an utility function for obtaining scaling filter of legendre wavelet.
Parameters
----------
wname: str
wavelet name, 'legd1' to 'legd9'
Returns
-------
F: array_like
scaling filter
Examples
--------
F = legdwavf('sym7')
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_LEGENDRE):
raise Exception("Wrong wavelet name!")
lowPass = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
_legdwavf(wname.encode(),b'Lo_R',lowPass)
return lowPass
def biorwavf(wname):
"""
biorwavf is an utility function for obtaining twin scaling filters of bi-orthogonal spline wavelet including bior1.1, bior1.3, bior1.5, bior2.2, bior2.4, bior2.6, bior2.8, bior3.1, bior3.3, bior3.5, bior3.7, bior3.9, bior4.4, bior5.5 and bior6.8. Although the twin filters have different length, zeros has been fed to keep two filters the same length.
Parameters
----------
wname: str
wavelet name, 'bior1.1' to 'bior6.8'
Returns
-------
RF: array_like
synthesis scaling filter
DF: array_like
analysis scaling filter
Examples
--------
RF,DF = biorwavf('bior3.3');
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_SPLINE_BIORTH):
raise Exception("Wrong wavelet name!")
filterLength = _wfilters_length(wname.encode())
RF = np.zeros(filterLength, dtype=np.float64)
DF = np.zeros(filterLength, dtype=np.float64)
_biorwavf(wname.encode(),b'Lo_R',False,RF)
_biorwavf(wname.encode(),b'Lo_D',True,DF)
return RF,DF
def rbiorwavf(wname):
"""
rbiorwavf is an utility function for obtaining twin scaling filters of bi-orthogonal spline wavelet including bior1.1, bior1.3, bior1.5, bior2.2, bior2.4, bior2.6, bior2.8, bior3.1, bior3.3, bior3.5, bior3.7, bior3.9, bior4.4, bior5.5 and bior6.8. Although the twin filters have different length, zeros has been fed to keep two filters the same length. rbiorwavf is reversing the results of biorwavf.
Parameters
----------
wname: str
wavelet name, 'rbior1.1' to 'rbior6.8'
Returns
-------
RF: array_like
synthesis scaling filter
DF: array_like
analysis scaling filter
Examples
--------
[RF,DF]=rbiorwavf('rbior3.3')
"""
ret = _wavelet_parser(wname.encode())
if (ret[0] != PYYAWT_SPLINE_RBIORTH):
raise Exception("Wrong wavelet name!")
RF = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
DF = np.zeros(_wfilters_length(wname.encode()),dtype=np.float64)
_rbiorwavf(wname.encode(),b'Lo_R',False,RF)
_rbiorwavf(wname.encode(),b'Lo_D',True,DF)
return RF,DF
def wfilters(wname,filterType=None):
"""wfilters is an utility function for obtaining analysis and synthesis filter set.
Calling Sequence
----------------
[Lo_D,Hi_D,Lo_R,Hi_R]=wfilters(wname)
[Lo_D,Hi_D]=wfilters(wname,'d')
[Lo_R,Hi_R]=wfilters(wname,'r')
[Lo_D,Lo_R]=wfilters(wname,'l')
[Hi_D,Hi_R]=wfilters(wname,'h')
Parameters
----------
wname: str
wavelet name, wavelet name, haar( "haar"), daubechies ("db1" to "db20"), coiflets ("coif1" to "coif5"), symlets ("sym2" to "sym20"), legendre ("leg1" to "leg9"), bathlets("bath4.0" to "bath4.15" and "bath6.0" to "bath6.15"), dmey ("dmey"), beyklin ("beylkin"), vaidyanathan ("vaidyanathan"), biorthogonal B-spline wavelets ("bior1.1" to "bior6.8"), "rbior1.1" to "rbior6.8"
Returns
-------
Lo_D: array_like
lowpass analysis filter
Hi_D: array_like
highpass analysis filter
Lo_R: array_like
lowpass synthesis filter
Hi_R: array_like
highpass synthesis filter
Examples
--------
[lo_d,hi_d,lo_r,hi_r]=wfilters('db2')
"""
ret_family, ret_member = _wavelet_parser(wname.encode())
if (np.any(ret_family == np.array([PYYAWT_FARRAS, PYYAWT_KINGSBURYQ, PYYAWT_NOT_DEFINED]))):
raise Exception("Wrong wavelet name!")
filterLength = _wfilters_length(wname.encode())
Lo_D = np.zeros(filterLength,dtype=np.float64)
Hi_D = np.zeros(filterLength,dtype=np.float64)
Lo_R = np.zeros(filterLength,dtype=np.float64)
Hi_R = np.zeros(filterLength,dtype=np.float64)
if (ret_family == PYYAWT_DAUBECHIES):
_dbwavf(wname.encode(),b'Lo_D',Lo_D)
_dbwavf(wname.encode(),b'Hi_D',Hi_D)
_dbwavf(wname.encode(),b'Lo_R',Lo_R)
_dbwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_COIFLETS):
_coifwavf(wname.encode(),b'Lo_D',Lo_D)
_coifwavf(wname.encode(),b'Hi_D',Hi_D)
_coifwavf(wname.encode(),b'Lo_R',Lo_R)
_coifwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_SYMLETS):
_symwavf(wname.encode(),b'Lo_D',Lo_D)
_symwavf(wname.encode(),b'Hi_D',Hi_D)
_symwavf(wname.encode(),b'Lo_R',Lo_R)
_symwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_SPLINE_BIORTH):
_biorwavf(wname.encode(),b'Lo_D',False,Lo_D)
_biorwavf(wname.encode(),b'Hi_D',False,Hi_D)
_biorwavf(wname.encode(),b'Lo_R',False,Lo_R)
_biorwavf(wname.encode(),b'Hi_R',False,Hi_R)
elif (ret_family == PYYAWT_BEYLKIN):
_beylkinwavf(wname.encode(),b'Lo_D',Lo_D)
_beylkinwavf(wname.encode(),b'Hi_D',Hi_D)
_beylkinwavf(wname.encode(),b'Lo_R',Lo_R)
_beylkinwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_VAIDYANATHAN):
_vaidyanathanwavf(wname.encode(),b'Lo_D',Lo_D)
_vaidyanathanwavf(wname.encode(),b'Hi_D',Hi_D)
_vaidyanathanwavf(wname.encode(),b'Lo_R',Lo_R)
_vaidyanathanwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_DMEY):
_dmeywavf(wname.encode(),b'Lo_D',Lo_D)
_dmeywavf(wname.encode(),b'Hi_D',Hi_D)
_dmeywavf(wname.encode(),b'Lo_R',Lo_R)
_dmeywavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_BATHLETS):
_bathletswavf(wname.encode(),b'Lo_D',Lo_D)
_bathletswavf(wname.encode(),b'Hi_D',Hi_D)
_bathletswavf(wname.encode(),b'Lo_R',Lo_R)
_bathletswavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_LEGENDRE):
_legendrewavf(wname.encode(),b'Lo_D',Lo_D)
_legendrewavf(wname.encode(),b'Hi_D',Hi_D)
_legendrewavf(wname.encode(),b'Lo_R',Lo_R)
_legendrewavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_SPLINE_RBIORTH):
_rbiorwavf(wname.encode(),b'Lo_D',False,Lo_D)
_rbiorwavf(wname.encode(),b'Hi_D',False,Hi_D)
_rbiorwavf(wname.encode(),b'Lo_R',False,Lo_R)
_rbiorwavf(wname.encode(),b'Hi_R',False,Hi_R)
elif (ret_family == PYYAWT_HAAR):
_haarwavf(wname.encode(),b'Lo_D',Lo_D)
_haarwavf(wname.encode(),b'Hi_D',Hi_D)
_haarwavf(wname.encode(),b'Lo_R',Lo_R)
_haarwavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_FARRAS):
_farraswavf(wname.encode(),b'Lo_D',Lo_D)
_farraswavf(wname.encode(),b'Hi_D',Hi_D)
_farraswavf(wname.encode(),b'Lo_R',Lo_R)
_farraswavf(wname.encode(),b'Hi_R',Hi_R)
elif (ret_family == PYYAWT_KINGSBURYQ):
_kingsburyqwavf(wname.encode(),b'Lo_D',Lo_D)
_kingsburyqwavf(wname.encode(),b'Hi_D',Hi_D)
_kingsburyqwavf(wname.encode(),b'Lo_R',Lo_R)
_kingsburyqwavf(wname.encode(),b'Hi_R',Hi_R)
# _wfilters(wname.encode(),Lo_D,Hi_D,Lo_R,Hi_R)
if (filterType is None):
flow = 1
return Lo_D, Hi_D, Lo_R, Hi_R
elif (filterType == 'd'):
flow = 2
return Lo_D, Hi_D
elif (filterType == 'r'):
flow = 3
return Lo_R, Hi_R
elif (filterType == 'l'):
flow = 4
return Lo_D, Lo_R
elif (filterType == 'h'):
flow = 5
return Hi_D, Hi_R
else:
raise Exception("Wrong input!")
def wmaxlev(signalLength,wname):
"""
wmaxlev is the maximum decompostion level calculation utility.
Parameters
----------
signalLength: int
signal length
wname: str
wavelet name, wavelet name, haar( "haar"), daubechies ("db1" to "db20"), coiflets ("coif1" to "coif5"), symlets ("sym2" to "sym20"), legendre ("leg1" to "leg9"), bathlets("bath4.0" to "bath4.15" and "bath6.0" to "bath6.15"), dmey ("dmey"), beyklin ("beylkin"), vaidyanathan ("vaidyanathan"), biorthogonal B-spline wavelets ("bior1.1" to "bior6.8"), "rbior1.1" to "rbior6.8"
Returns
-------
L: int
decomposition level
Examples
--------
L=wmaxlev(100,'db5')
"""
if (np.size(signalLength) == 1):
filterLength = _wfilters_length(wname.encode())
stride, val = _wave_len_validate(signalLength,filterLength)
if (val == 0):
raise Exception("Unrecognized Input Pattern or parameter not valid for the algorithm! Please refer to help pages!\n")
return stride
elif (np.size(signalLength) == 2):
filterLength = _wfilters_length(wname.encode())
stride1, val1 = _wave_len_validate(signalLength[0],filterLength)
if (val1 == 0):
raise Exception("The wavelet you select is not appropriate for that row size of the matrix!\n")
stride2, val2 = _wave_len_validate(signalLength[1],filterLength)
if (val2 == 0):
raise Exception("The wavelet you select is not appropriate for that column size of the matrix!\n")
return np.min([stride1, stride2])
else:
return 0
def dwtmode(mode=None, nodisp=None):
"""
dwtmode is to display or change extension mode.
Parameters
----------
mode: str
'symh'('sym'), 'symw', 'asymh', 'asymw', 'zpd', 'zpd', 'per', 'ppd'.
nodisp: str
None or 'nodisp'
Returns
-------
mode: str
extension mode
Examples
--------
dwtmode()
dwtmode('status')
mode=dwtmode('status','nodisp')
dwtmode(mode)
"""
mode_strings = np.array(['zpd','symh', 'symw', 'asymh', 'asymw', 'sp0', 'sp1', 'ppd', 'per'])
modes = np.array([PYYAWT_ZPD, PYYAWT_SYMH, PYYAWT_SYMW, PYYAWT_ASYMH, PYYAWT_ASYMW, PYYAWT_SP0,
PYYAWT_SP1, PYYAWT_PPD, PYYAWT_PER])
if (mode is None or mode == "status" and nodisp is None):
dwtmode = _getdwtMode()
if (dwtmode == PYYAWT_ZPD):
print("** DWT Extension Mode: Zero Padding **\n")
elif (dwtmode == PYYAWT_SYMH):
print("** DWT Extension Mode: Half Symmetrization **\n")
elif (dwtmode == PYYAWT_SYMW):
print("** DWT Extension Mode: Whole Symmetrization **\n")
elif (dwtmode == PYYAWT_ASYMH):
print("** DWT Extension Mode: Half Asymmetrization **\n")
elif (dwtmode == PYYAWT_ASYMW):
print("** DWT Extension Mode: Whole Asymmetrization **\n")
elif (dwtmode == PYYAWT_SP0):
print("** DWT Extension Mode: order 0 smooth padding **\n")
elif (dwtmode == PYYAWT_SP1):
print("** DWT Extension Mode: order 1 smooth padding **\n")
elif (dwtmode == PYYAWT_PPD):
print("** DWT Extension Mode: Periodic Padding**\n")
elif (dwtmode == PYYAWT_PER):
print("** DWT Extension Mode: Periodization **\n")
elif (np.any(mode == modes)):
errCode = _dwtWrite(mode_strings[np.where(mode == modes)[0]].encode())
if (errCode > 0):
raise Exception("DWT Extension Mode error")
elif (np.any(mode == mode_strings)):
errCode = _dwtWrite(mode.encode())
if (errCode > 0):
raise Exception("DWT Extension Mode error")
elif (mode == "status" and nodisp == "nodisp"):
dwtmode = _getdwtMode()
modestr = ""
if (dwtmode == PYYAWT_ZPD):
modestr = 'zpd'
elif (dwtmode == PYYAWT_SYMH):
modestr = 'symh'
elif (dwtmode == PYYAWT_SYMW):
modestr = 'symw'
elif (dwtmode == PYYAWT_ASYMH):
modestr = 'asymh'
elif (dwtmode == PYYAWT_ASYMW):
modestr = 'asymw'
elif (dwtmode == PYYAWT_SP0):
modestr = 'sp0'
elif (dwtmode == PYYAWT_SP1):
modestr = 'sp1'
elif (dwtmode == PYYAWT_PPD):
modestr = 'ppd'
elif (dwtmode == PYYAWT_PER):
modestr = 'per'
return modestr
| |
from __future__ import division, print_function, absolute_import
import time
import sys
# Verify curses module for Windows and Notebooks Support
try:
from IPython.core.display import clear_output
except:
pass
CURSES_SUPPORTED = True
try:
import curses
except Exception:
print("curses is not supported on this machine (please install/reinstall curses for an optimal experience)")
CURSES_SUPPORTED = False
class Callback(object):
""" Callback base class. """
def __init__(self):
pass
def on_train_begin(self, training_state):
pass
def on_epoch_begin(self, training_state):
pass
def on_batch_begin(self, training_state):
pass
def on_sub_batch_begin(self, training_state):
pass
def on_sub_batch_end(self, training_state, train_index=0):
pass
def on_batch_end(self, training_state, snapshot=False):
pass
def on_epoch_end(self, training_state):
pass
def on_train_end(self, training_state):
pass
class ChainCallback(Callback):
def __init__(self, callbacks=[]):
self.callbacks = callbacks
def on_train_begin(self, training_state):
for callback in self.callbacks:
callback.on_train_begin(training_state)
def on_epoch_begin(self, training_state):
for callback in self.callbacks:
callback.on_epoch_begin(training_state)
def on_batch_begin(self, training_state):
for callback in self.callbacks:
callback.on_batch_begin(training_state)
def on_sub_batch_begin(self, training_state):
for callback in self.callbacks:
callback.on_sub_batch_begin(training_state)
def on_sub_batch_end(self, training_state, train_index=0):
for callback in self.callbacks:
callback.on_sub_batch_end(training_state, train_index)
def on_batch_end(self, training_state, snapshot=False):
for callback in self.callbacks:
callback.on_batch_end(training_state, snapshot)
def on_epoch_end(self, training_state):
for callback in self.callbacks:
callback.on_epoch_end(training_state)
def on_train_end(self, training_state):
for callback in self.callbacks:
callback.on_train_end(training_state)
def add(self, callback):
if not isinstance(callback, Callback):
raise Exception(str(callback) + " is an invalid Callback object")
self.callbacks.append(callback)
class TermLogger(Callback):
def __init__(self):
self.data = []
self.has_ipython = True
self.display_type = "multi"
self.global_data_size = 0
self.global_val_data_size = 0
self.snapped = False
global CURSES_SUPPORTED
if CURSES_SUPPORTED:
try:
curses.setupterm()
sys.stdout.write(curses.tigetstr('civis').decode())
except Exception:
CURSES_SUPPORTED = False
try:
clear_output
except NameError:
self.has_ipython = False
def add(self, data_size, val_size=0, metric_name=None, name=None):
if not metric_name: metric_name = 'acc'
self.data.append({
'name': name if name else "Train op. " + str(len(self.data)),
'metric_name': metric_name,
'data_size': data_size,
'epoch': 0,
'step': 0,
'val_size': val_size,
'loss': None,
'acc': None,
'val_loss': None,
'val_acc': None
})
self.global_data_size += data_size
self.global_val_data_size += val_size
def on_epoch_begin(self, training_state):
training_state.step_time = time.time()
training_state.step_time_total = 0.
def on_epoch_end(self, training_state):
pass
def on_batch_begin(self, training_state):
training_state.step_time = time.time()
def on_batch_end(self, training_state, snapshot=False):
training_state.step_time_total += time.time() - training_state.step_time
if snapshot:
self.snapshot_termlogs(training_state)
else:
self.print_termlogs(training_state)
def on_sub_batch_start(self, training_state):
pass
def on_sub_batch_end(self, training_state, train_index=0):
self.data[train_index]['loss'] = training_state.loss_value
self.data[train_index]['acc'] = training_state.acc_value
self.data[train_index]['val_loss'] = training_state.val_loss
self.data[train_index]['val_acc'] = training_state.val_acc
self.data[train_index]['epoch'] = training_state.epoch
self.data[train_index]['step'] = training_state.current_iter
def on_train_begin(self, training_state):
print("---------------------------------")
print("Training samples: " + str(self.global_data_size))
print("Validation samples: " + str(self.global_val_data_size))
print("--")
if len(self.data) == 1:
self.display_type = "single"
def on_train_end(self, training_state):
# Reset caret to last position
to_be_printed = ""
if CURSES_SUPPORTED: #if not self.has_ipython #TODO:check bug here
for i in range(len(self.data) + 2):
to_be_printed += "\033[B"
if not self.snapped:
to_be_printed += "--\n"
sys.stdout.write(to_be_printed)
sys.stdout.flush()
# Set caret visible if possible
if CURSES_SUPPORTED:
sys.stdout.write(curses.tigetstr('cvvis').decode())
def termlogs(self, step=0, global_loss=None, global_acc=None, step_time=None):
termlogs = "Training Step: " + str(step) + " "
if global_loss:
termlogs += " | total loss: \033[1m\033[32m" + \
"%.5f" % global_loss + "\033[0m\033[0m"
if global_acc and not self.display_type == "single":
termlogs += " - avg acc: %.4f" % float(global_acc)
if step_time:
termlogs += " | time: %.3fs" % step_time
termlogs += "\n"
for i, data in enumerate(self.data):
print_loss = ""
print_acc = ""
print_val_loss = ""
print_val_acc = ""
if data['loss'] is not None:
print_loss = " | loss: " + "%.5f" % data['loss']
if data['acc'] is not None:
print_acc = " - " + data['metric_name'] + ": " + \
"%.4f" % data['acc']
if data['val_loss'] is not None:
print_val_loss = " | val_loss: " + "%.5f" % data['val_loss']
if data['val_acc'] is not None:
print_val_acc = " - val_acc: " + "%.4f" % data['val_acc']
# fix diplay, if step reached the whole epoch, display epoch - 1, as epoch has been updated
print_epoch = data['epoch']
# Smoothing display, so we show display at step + 1 to show data_size/data_size at end
print_step = " -- iter: " + \
("%0" + str(len(str(data['data_size']))) +
"d") % data['step'] + "/" + str(data['data_size'])
if data['step'] == 0:
print_epoch = data['epoch']
# print_step = ""
print_step = " -- iter: " + ("%0" + str(
len(str(data['data_size']))) + "d") % 0 \
+ "/" + str(data['data_size'])
termlogs += "\x1b[2K\r| " + data['name'] + " | epoch: " + \
"%03d" % print_epoch + print_loss + print_acc + \
print_val_loss + print_val_acc + print_step + "\n"
return termlogs
def print_termlogs(self, training_state):
termlogs = self.termlogs(
step=training_state.step,
global_loss=training_state.global_loss,
global_acc=training_state.global_acc,
step_time=training_state.step_time_total)
if self.has_ipython and not CURSES_SUPPORTED:
clear_output(wait=True)
else:
for i in range(len(self.data) + 1):
termlogs += "\033[A"
sys.stdout.write(termlogs)
sys.stdout.flush()
def snapshot_termlogs(self, training_state):
termlogs = self.termlogs(
step=training_state.step,
global_loss=training_state.global_loss,
global_acc=training_state.global_acc,
step_time=training_state.step_time_total)
termlogs += "--\n"
sys.stdout.write(termlogs)
sys.stdout.flush()
self.snapped = True
class ModelSaver(Callback):
def __init__(self, save_func, snapshot_path, best_snapshot_path,
best_val_accuracy, snapshot_step, snapshot_epoch):
self.save_func = save_func
self.snapshot_path = snapshot_path
self.snapshot_epoch = snapshot_epoch
self.best_snapshot_path = best_snapshot_path
self.best_val_accuracy = best_val_accuracy
self.snapshot_step = snapshot_step
def on_epoch_begin(self, training_state):
pass
def on_epoch_end(self, training_state):
if self.snapshot_epoch:
self.save(training_state.step)
def on_batch_begin(self, training_state):
pass
def on_batch_end(self, training_state, snapshot=False):
if snapshot & (self.snapshot_step is not None):
self.save(training_state.step)
if None not in (self.best_snapshot_path, self.best_val_accuracy, training_state.val_acc):
if training_state.val_acc > self.best_val_accuracy:
self.best_val_accuracy = training_state.val_acc
self.save_best(int(10000 * round(training_state.val_acc, 4)))
def on_sub_batch_begin(self, training_state):
pass
def on_sub_batch_end(self, training_state, train_index=0):
pass
def on_train_begin(self, training_state):
pass
def on_train_end(self, training_state):
pass
def save(self, training_step=0):
if self.snapshot_path:
self.save_func(self.snapshot_path, training_step)
def save_best(self, val_accuracy):
if self.best_snapshot_path:
snapshot_path = self.best_snapshot_path + str(val_accuracy)
self.save_func(snapshot_path)
| |
# coding=utf-8
import json
from collections import OrderedDict
import certifi
import ssl
import os
import socket
import logging
import requests
import xmlrpclib
import dns.resolver
import ipaddress
import re
from requests import exceptions
from urllib3.util import connection
from retry.api import retry_call
from exceptions import APIThrottled
from dogpile.cache.api import NO_VALUE
from subliminal.cache import region
from subliminal_patch.pitcher import pitchers
from cloudscraper import CloudScraper
try:
import brotli
except:
pass
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from subzero.lib.io import get_viable_encoding
logger = logging.getLogger(__name__)
pem_file = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(unicode(__file__, get_viable_encoding()))),
"..", certifi.where()))
class TimeoutSession(requests.Session):
timeout = 10
def __init__(self, timeout=None):
super(TimeoutSession, self).__init__()
self.timeout = timeout or self.timeout
def request(self, method, url, *args, **kwargs):
if kwargs.get('timeout') is None:
kwargs['timeout'] = self.timeout
return super(TimeoutSession, self).request(method, url, *args, **kwargs)
class CertifiSession(TimeoutSession):
def __init__(self, verify=None):
super(CertifiSession, self).__init__()
self.verify = verify or pem_file
class NeedsCaptchaException(Exception):
pass
class CFSession(CloudScraper):
def __init__(self, *args, **kwargs):
super(CFSession, self).__init__(*args, **kwargs)
self.debug = os.environ.get("CF_DEBUG", False)
def _request(self, method, url, *args, **kwargs):
ourSuper = super(CloudScraper, self)
resp = ourSuper.request(method, url, *args, **kwargs)
if resp.headers.get('Content-Encoding') == 'br':
if self.allow_brotli and resp._content:
resp._content = brotli.decompress(resp.content)
else:
logging.warning('Brotli content detected, But option is disabled, we will not continue.')
return resp
# Debug request
if self.debug:
self.debugRequest(resp)
# Check if Cloudflare anti-bot is on
try:
if self.isChallengeRequest(resp):
if resp.request.method != 'GET':
# Work around if the initial request is not a GET,
# Supersede with a GET then re-request the original METHOD.
CloudScraper.request(self, 'GET', resp.url)
resp = ourSuper.request(method, url, *args, **kwargs)
else:
# Solve Challenge
resp = self.sendChallengeResponse(resp, **kwargs)
except ValueError, e:
if e.message == "Captcha":
parsed_url = urlparse(url)
domain = parsed_url.netloc
# solve the captcha
site_key = re.search(r'data-sitekey="(.+?)"', resp.content).group(1)
challenge_s = re.search(r'type="hidden" name="s" value="(.+?)"', resp.content).group(1)
challenge_ray = re.search(r'data-ray="(.+?)"', resp.content).group(1)
if not all([site_key, challenge_s, challenge_ray]):
raise Exception("cf: Captcha site-key not found!")
pitcher = pitchers.get_pitcher()("cf: %s" % domain, resp.request.url, site_key,
user_agent=self.headers["User-Agent"],
cookies=self.cookies.get_dict(),
is_invisible=True)
parsed_url = urlparse(resp.url)
logger.info("cf: %s: Solving captcha", domain)
result = pitcher.throw()
if not result:
raise Exception("cf: Couldn't solve captcha!")
submit_url = '{}://{}/cdn-cgi/l/chk_captcha'.format(parsed_url.scheme, domain)
method = resp.request.method
cloudflare_kwargs = {
'allow_redirects': False,
'headers': {'Referer': resp.url},
'params': OrderedDict(
[
('s', challenge_s),
('g-recaptcha-response', result)
]
)
}
return CloudScraper.request(self, method, submit_url, **cloudflare_kwargs)
return resp
def request(self, method, url, *args, **kwargs):
parsed_url = urlparse(url)
domain = parsed_url.netloc
cache_key = "cf_data3_%s" % domain
if not self.cookies.get("cf_clearance", "", domain=domain):
cf_data = region.get(cache_key)
if cf_data is not NO_VALUE:
cf_cookies, hdrs = cf_data
logger.debug("Trying to use old cf data for %s: %s", domain, cf_data)
for cookie, value in cf_cookies.iteritems():
self.cookies.set(cookie, value, domain=domain)
self.headers = hdrs
ret = self._request(method, url, *args, **kwargs)
try:
cf_data = self.get_cf_live_tokens(domain)
except:
pass
else:
if cf_data and "cf_clearance" in cf_data[0] and cf_data[0]["cf_clearance"]:
if cf_data != region.get(cache_key):
logger.debug("Storing cf data for %s: %s", domain, cf_data)
region.set(cache_key, cf_data)
elif cf_data[0]["cf_clearance"]:
logger.debug("CF Live tokens not updated")
return ret
def get_cf_live_tokens(self, domain):
for d in self.cookies.list_domains():
if d.startswith(".") and d in ("." + domain):
cookie_domain = d
break
else:
raise ValueError(
"Unable to find Cloudflare cookies. Does the site actually have "
"Cloudflare IUAM (\"I'm Under Attack Mode\") enabled?")
return (OrderedDict(filter(lambda x: x[1], [
("__cfduid", self.cookies.get("__cfduid", "", domain=cookie_domain)),
("cf_clearance", self.cookies.get("cf_clearance", "", domain=cookie_domain))
])),
self.headers
)
class RetryingSession(CertifiSession):
proxied_functions = ("get", "post")
def __init__(self):
super(RetryingSession, self).__init__()
proxy = os.environ.get('SZ_HTTP_PROXY')
if proxy:
self.proxies = {
"http": proxy,
"https": proxy
}
def retry_method(self, method, *args, **kwargs):
if self.proxies:
# fixme: may be a little loud
logger.debug("Using proxy %s for: %s", self.proxies["http"], args[0])
return retry_call(getattr(super(RetryingSession, self), method), fargs=args, fkwargs=kwargs, tries=3, delay=5,
exceptions=(exceptions.ConnectionError,
exceptions.ProxyError,
exceptions.SSLError,
exceptions.Timeout,
exceptions.ConnectTimeout,
exceptions.ReadTimeout,
socket.timeout))
def get(self, *args, **kwargs):
if self.proxies and "timeout" in kwargs and kwargs["timeout"]:
kwargs["timeout"] = kwargs["timeout"] * 3
return self.retry_method("get", *args, **kwargs)
def post(self, *args, **kwargs):
if self.proxies and "timeout" in kwargs and kwargs["timeout"]:
kwargs["timeout"] = kwargs["timeout"] * 3
return self.retry_method("post", *args, **kwargs)
class RetryingCFSession(RetryingSession, CFSession):
pass
class SubZeroRequestsTransport(xmlrpclib.SafeTransport):
"""
Drop in Transport for xmlrpclib that uses Requests instead of httplib
Based on: https://gist.github.com/chrisguitarguy/2354951#gistcomment-2388906
"""
# change our user agent to reflect Requests
user_agent = "Python XMLRPC with Requests (python-requests.org)"
proxies = None
xm_ver = 1
session_var = "PHPSESSID"
def __init__(self, use_https=True, verify=None, user_agent=None, timeout=10, *args, **kwargs):
self.verify = pem_file if verify is None else verify
self.use_https = use_https
self.user_agent = user_agent if user_agent is not None else self.user_agent
self.timeout = timeout
self.session = requests.Session()
self.session.headers['User-Agent'] = self.user_agent
# if 'requests' in self.session.headers['User-Agent']:
# # Set a random User-Agent if no custom User-Agent has been set
# self.session.headers = User_Agent(allow_brotli=False).headers
proxy = os.environ.get('SZ_HTTP_PROXY')
if proxy:
self.proxies = {
"http": proxy,
"https": proxy
}
xmlrpclib.SafeTransport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose=0):
"""
Make an xmlrpc request.
"""
url = self._build_url(host, handler)
cache_key = "xm%s_%s" % (self.xm_ver, host)
old_sessvar = self.session.cookies.get(self.session_var, "")
if not old_sessvar:
data = region.get(cache_key)
if data is not NO_VALUE:
logger.debug("Trying to re-use headers/cookies for %s" % host)
self.session.cookies, self.session.headers = data
old_sessvar = self.session.cookies.get(self.session_var, "")
try:
resp = self.session.post(url, data=request_body,
stream=True, timeout=self.timeout, proxies=self.proxies,
verify=self.verify)
if self.session_var in resp.cookies and resp.cookies[self.session_var] != old_sessvar:
logger.debug("Storing %s cookies" % host)
region.set(cache_key, [self.session.cookies, self.session.headers])
except ValueError:
logger.debug("Wiping cookies/headers cache (VE) for %s" % host)
region.delete(cache_key)
raise
except Exception:
logger.debug("Wiping cookies/headers cache (EX) for %s" % host)
region.delete(cache_key)
raise # something went wrong
else:
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
logger.debug("Wiping cookies/headers cache (RE) for %s" % host)
region.delete(cache_key)
raise
try:
if 'x-ratelimit-remaining' in resp.headers and int(resp.headers['x-ratelimit-remaining']) <= 2:
raise APIThrottled()
except ValueError:
logger.info('Couldn\'t parse "x-ratelimit-remaining": %r' % resp.headers['x-ratelimit-remaining'])
self.verbose = verbose
try:
return self.parse_response(resp.raw)
except:
logger.debug("Bad response data: %r", resp.raw)
def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
scheme = 'https' if self.use_https else 'http'
handler = handler[1:] if handler and handler[0] == "/" else handler
return '%s://%s/%s' % (scheme, host, handler)
_orig_create_connection = connection.create_connection
dns_cache = {}
_custom_resolver = None
_custom_resolver_ips = None
def patch_create_connection():
if hasattr(connection.create_connection, "_sz_patched"):
return
def patched_create_connection(address, *args, **kwargs):
"""Wrap urllib3's create_connection to resolve the name elsewhere"""
# resolve hostname to an ip address; use your own
# resolver here, as otherwise the system resolver will be used.
global _custom_resolver, _custom_resolver_ips, dns_cache
host, port = address
try:
ipaddress.ip_address(unicode(host))
except (ipaddress.AddressValueError, ValueError):
__custom_resolver_ips = os.environ.get("dns_resolvers", None)
# resolver ips changed in the meantime?
if __custom_resolver_ips != _custom_resolver_ips:
_custom_resolver = None
_custom_resolver_ips = __custom_resolver_ips
dns_cache = {}
custom_resolver = _custom_resolver
if not custom_resolver:
if _custom_resolver_ips:
logger.debug("DNS: Trying to use custom DNS resolvers: %s", _custom_resolver_ips)
custom_resolver = dns.resolver.Resolver(configure=False)
custom_resolver.lifetime = os.environ.get("dns_resolvers_timeout", 8.0)
try:
custom_resolver.nameservers = json.loads(_custom_resolver_ips)
except:
logger.debug("DNS: Couldn't load custom DNS resolvers: %s", _custom_resolver_ips)
else:
_custom_resolver = custom_resolver
if custom_resolver:
if host in dns_cache:
ip = dns_cache[host]
logger.debug("DNS: Using %s=%s from cache", host, ip)
return _orig_create_connection((ip, port), *args, **kwargs)
else:
try:
ip = custom_resolver.query(host)[0].address
logger.debug("DNS: Resolved %s to %s using %s", host, ip, custom_resolver.nameservers)
dns_cache[host] = ip
return _orig_create_connection((ip, port), *args, **kwargs)
except dns.exception.DNSException:
logger.warning("DNS: Couldn't resolve %s with DNS: %s", host, custom_resolver.nameservers)
logger.debug("DNS: Falling back to default DNS or IP on %s", host)
return _orig_create_connection((host, port), *args, **kwargs)
patch_create_connection._sz_patched = True
connection.create_connection = patched_create_connection
patch_create_connection()
| |
from array import array
from collections import abc
import sys
_marker = object()
class istr(str):
"""Case insensitive str."""
__is_istr__ = True
def __new__(cls, val='',
encoding=sys.getdefaultencoding(), errors='strict'):
if getattr(val, '__is_istr__', False):
# Faster than instance check
return val
if type(val) is str:
pass
else:
val = str(val)
val = val.title()
return str.__new__(cls, val)
def title(self):
return self
upstr = istr # for relaxing backward compatibility problems
def getversion(md):
if not isinstance(md, _Base):
raise TypeError("Parameter should be multidict or proxy")
return md._impl._version
_version = array('Q', [0])
class _Impl:
__slots__ = ('_items', '_version')
def __init__(self):
self._items = []
self.incr_version()
def incr_version(self):
global _version
v = _version
v[0] += 1
self._version = v[0]
class _Base:
def _title(self, key):
return key
def getall(self, key, default=_marker):
"""Return a list of all values matching the key."""
identity = self._title(key)
res = [v for i, k, v in self._impl._items if i == identity]
if res:
return res
if not res and default is not _marker:
return default
raise KeyError('Key not found: %r' % key)
def getone(self, key, default=_marker):
"""Get first value matching the key."""
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return v
if default is not _marker:
return default
raise KeyError('Key not found: %r' % key)
# Mapping interface #
def __getitem__(self, key):
return self.getone(key)
def get(self, key, default=None):
"""Get first value matching the key.
The method is alias for .getone().
"""
return self.getone(key, default)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._impl._items)
def keys(self):
"""Return a new view of the dictionary's keys."""
return _KeysView(self._impl)
def items(self):
"""Return a new view of the dictionary's items *(key, value) pairs)."""
return _ItemsView(self._impl)
def values(self):
"""Return a new view of the dictionary's values."""
return _ValuesView(self._impl)
def __eq__(self, other):
if not isinstance(other, abc.Mapping):
return NotImplemented
if isinstance(other, _Base):
lft = self._impl._items
rht = other._impl._items
if len(lft) != len(rht):
return False
for (i1, k2, v1), (i2, k2, v2) in zip(lft, rht):
if i1 != i2 or v1 != v2:
return False
return True
if len(self._impl._items) != len(other):
return False
for k, v in self.items():
nv = other.get(k, _marker)
if v != nv:
return False
return True
def __contains__(self, key):
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return True
return False
def __repr__(self):
body = ', '.join("'{}': {!r}".format(k, v) for k, v in self.items())
return '<{}({})>'.format(self.__class__.__name__, body)
class MultiDictProxy(_Base, abc.Mapping):
def __init__(self, arg):
if not isinstance(arg, (MultiDict, MultiDictProxy)):
raise TypeError(
'ctor requires MultiDict or MultiDictProxy instance'
', not {}'.format(
type(arg)))
self._impl = arg._impl
def __reduce__(self):
raise TypeError("can't pickle {} objects".format(
self.__class__.__name__))
def copy(self):
"""Return a copy of itself."""
return MultiDict(self.items())
class CIMultiDictProxy(MultiDictProxy):
def __init__(self, arg):
if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):
raise TypeError(
'ctor requires CIMultiDict or CIMultiDictProxy instance'
', not {}'.format(
type(arg)))
self._impl = arg._impl
def _title(self, key):
return key.title()
def copy(self):
"""Return a copy of itself."""
return CIMultiDict(self.items())
class MultiDict(_Base, abc.MutableMapping):
def __init__(self, *args, **kwargs):
self._impl = _Impl()
self._extend(args, kwargs, self.__class__.__name__, self.add)
def __reduce__(self):
return (self.__class__, (list(self.items()),))
def _title(self, key):
return key
def _key(self, key):
if isinstance(key, str):
return str(key)
else:
raise TypeError("MultiDict keys should be either str "
"or subclasses of str")
def add(self, key, value):
identity = self._title(key)
self._impl._items.append((identity, self._key(key), value))
self._impl.incr_version()
def copy(self):
"""Return a copy of itself."""
cls = self.__class__
return cls(self.items())
def extend(self, *args, **kwargs):
"""Extend current MultiDict with more values.
This method must be used instead of update.
"""
self._extend(args, kwargs, 'extend', self.add)
def _extend(self, args, kwargs, name, method):
if len(args) > 1:
raise TypeError("{} takes at most 1 positional argument"
" ({} given)".format(name, len(args)))
if args:
arg = args[0]
if isinstance(args[0], MultiDictProxy):
items = arg._impl._items
elif isinstance(args[0], MultiDict):
items = arg._impl._items
elif hasattr(arg, 'items'):
items = [(k, k, v) for k, v in arg.items()]
else:
items = []
for item in arg:
if not len(item) == 2:
raise TypeError(
"{} takes either dict or list of (key, value) "
"tuples".format(name))
items.append((item[0], item[0], item[1]))
for identity, key, value in items:
method(key, value)
for key, value in kwargs.items():
method(key, value)
def clear(self):
"""Remove all items from MultiDict."""
self._impl._items.clear()
self._impl.incr_version()
# Mapping interface #
def __setitem__(self, key, value):
key = self._title(key)
self._replace(key, value)
def __delitem__(self, key):
key = self._title(key)
items = self._impl._items
found = False
for i in range(len(items) - 1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
else:
self._impl.incr_version()
def setdefault(self, key, default=None):
"""Return value for key, set value to default if key is not present."""
key = self._title(key)
for i, k, v in self._impl._items:
if i == key:
return v
self.add(key, default)
return default
def popone(self, key, default=_marker):
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
"""
key = self._title(key)
for i in range(len(self._impl._items)):
if self._impl._items[i][0] == key:
value = self._impl._items[i][2]
del self._impl._items[i]
self._impl.incr_version()
return value
if default is _marker:
raise KeyError(key)
else:
return default
pop = popone
def popall(self, key, default=_marker):
"""Remove all occurrences of key and return the list of corresponding
values.
If key is not found, default is returned if given, otherwise
KeyError is raised.
"""
found = False
identity = self._title(key)
ret = []
for i in range(len(self._impl._items)-1, -1, -1):
item = self._impl._items[i]
if item[0] == identity:
ret.append(item[2])
del self._impl._items[i]
self._impl.incr_version()
found = True
if not found:
if default is _marker:
raise KeyError(key)
else:
return default
else:
ret.reverse()
return ret
def popitem(self):
"""Remove and return an arbitrary (key, value) pair."""
if self._impl._items:
i = self._impl._items.pop(0)
self._impl.incr_version()
return i[1], i[2]
else:
raise KeyError("empty multidict")
def update(self, *args, **kwargs):
"""Update the dictionary from *other*, overwriting existing keys."""
self._extend(args, kwargs, 'update', self._replace)
def _replace(self, key, value):
key = self._key(key)
identity = self._title(key)
items = self._impl._items
for i in range(len(items)-1, -1, -1):
item = items[i]
if item[0] == identity:
items[i] = (identity, key, value)
# i points to last found item
rgt = i
self._impl.incr_version()
break
else:
self._impl._items.append((identity, key, value))
self._impl.incr_version()
return
# remove all precending items
i = 0
while i < rgt:
item = items[i]
if item[0] == identity:
del items[i]
rgt -= 1
else:
i += 1
class CIMultiDict(MultiDict):
def _title(self, key):
return key.title()
class _ViewBase:
def __init__(self, impl):
self._impl = impl
self._version = impl._version
def __len__(self):
return len(self._impl._items)
class _ItemsView(_ViewBase, abc.ItemsView):
def __contains__(self, item):
assert isinstance(item, tuple) or isinstance(item, list)
assert len(item) == 2
for i, k, v in self._impl._items:
if item[0] == k and item[1] == v:
return True
return False
def __iter__(self):
for i, k, v in self._impl._items:
if self._version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield k, v
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}: {!r}".format(item[1], item[2]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
class _ValuesView(_ViewBase, abc.ValuesView):
def __contains__(self, value):
for item in self._impl._items:
if item[2] == value:
return True
return False
def __iter__(self):
for item in self._impl._items:
if self._version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield item[2]
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}".format(item[2]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
class _KeysView(_ViewBase, abc.KeysView):
def __contains__(self, key):
for item in self._impl._items:
if item[1] == key:
return True
return False
def __iter__(self):
for item in self._impl._items:
if self._version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield item[1]
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}".format(item[1]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
| |
#
# This file is part of GreatFET
#
import time
class GreatFETInterface(object):
"""
Generic base class for GreatFET peripherals.
"""
def __init__(self, device):
""" Default peripheral initializer -- just stores a reference to the relevant GreatFET. """
self.device = device
class PirateCompatibleInterface(GreatFETInterface):
""" Mix-in for interfaces that support bus-pirate style commands. """
def run_pirate_commands(self, command_string):
""" Runs a bus-pirate style command on the current interface. """
# Command characters for Bus Pirate commands.
_START_CHARS = "[{"
_STOP_CHARS = "]}"
_READ_CHARS = "Rr"
_DELAY_CHARS = "&"
_DELIMITERS = " ,"
# Numeric definitions for buspirate.
_CHARS_VALID_TO_START_NUMBER="0123456789"
_CHARS_VALID_IN_NUMBER="0123456789abcdefxh"
self._result = []
self._commands = list(command_string)
# Simple performance enhancement: we'll gather any consecutive reads/writes and issue
# them as single commands to the GreatFET.
self._pending_write_data = []
self._pending_read_length = 0
def issue_pending_writes(ends_transaction=False):
""" Issues any writes pending; used when performing a non-write operation."""
if not self._pending_write_data:
return
# Perform all of our pending writes.
self._result.extend(self._handle_pirate_write(self._pending_write_data, ends_transaction=ends_transaction))
self._pending_write_data = []
def perform_pending_reads(ends_transaction=False):
""" Issues any writes pending; used when performing a non-write operation."""
# If we don't have any pending reads, don't do anything.
if not self._pending_read_length:
return
# Perform all of our pending reads.
self._result.extend(self._handle_pirate_read(self._pending_read_length, ends_transaction=ends_transaction))
self._pending_read_length = 0
def handle_pending_io(ends_transaction=False):
""" Convenience method that handles any pending I/O."""
issue_pending_writes(ends_transaction=ends_transaction)
perform_pending_reads(ends_transaction=ends_transaction)
def extract_number(char=None):
"""
Extracts a number from the current command stream. Should only be called when the command stream
starts with a number.
"""
# Start building our number.
number = []
try:
# If we don't have a starting character, read one.
if char is None:
char = self._commands.pop(0)
# Grab all characters from the string until we run out of numbers.
while char in _CHARS_VALID_IN_NUMBER:
# Quirk: the bus pirate accepts 'h' as a synonym for 'x' in number prefixes.
# We'll convert it to 'x' to match python's prefix format.
char = 'x' if (char == 'h') else char
number.append(char)
char = self._commands.pop(0)
except IndexError:
# If we've run out of characters to parse, this is a de-facto delimiter.
# Convert it to one so we can handle the pending number below.
char = ' '
# If we don't have a number, return None.
if len(number) == 0:
return None
# Once we no longer have a valid numeric character, parse the number we've built.
number = ''.join(number)
return int(number, 0)
def get_repeat_count():
"""
Checks to see if the given stream has a bus-pirate repeat operator (:<number>), and if so, returns it.
If it doesn't, returns a default value of 1.
"""
if len(self._commands) and self._commands[0] == ':':
# Discard our colon...
del self._commands[0]
# ... and extract the relevant number.
return extract_number()
else:
return 1
# Handle each byte in the command string.
while self._commands:
# Start off with no repeat modifier, and no pending operation.
length = None
# Grab the next command-character in the queue.
char = self._commands.pop(0)
# If this character starts a number, we have a write operation.
if char in _CHARS_VALID_TO_START_NUMBER:
byte = extract_number(char)
if byte > 255:
raise ValueError("Bus pirate commands must provide only byte values to write!")
# Schedule the write.
perform_pending_reads()
self._pending_write_data.append(byte)
# Handle our core commands.
elif char in _START_CHARS:
handle_pending_io()
self._handle_pirate_start()
elif char in _STOP_CHARS:
handle_pending_io(ends_transaction=True)
self._handle_pirate_stop()
elif char in _READ_CHARS:
issue_pending_writes()
length = get_repeat_count()
self._pending_read_length += length
elif char in _DELAY_CHARS:
handle_pending_io()
# Compute the total length of time we want to delay...
duration_us = get_repeat_count()
time.sleep(duration_us / 1000000)
elif char in _DELIMITERS:
pass
else:
raise ValueError("Unsupported command character {}".format(char))
# TODO: support 'A/a/@'?
# TODO: support 'D/d'?
# TODO: message on 'Ww'?
return self._result
#
# Default (do-nothing) implementations of our support functions.
# Subclasses typically should implement these.
#
def _handle_pirate_read(self, length, ends_transaction=False):
""" Performs a bus-pirate read of the given length, and returns a list of numeric values. """
return []
def _handle_pirate_write(self, data, ends_transaction=False):
""" Performs a bus-pirate send of the relevant list of data, and returns a list of any data received. """
return []
def _handle_pirate_start(self):
""" Starts a given communication by performing any start conditions present on the interface. """
pass
def _handle_pirate_stop(self):
""" Starts a given communication by performing any start conditions present on the interface. """
pass
| |
from django.conf import settings
from django.test import TestCase
from unittest import SkipTest, skipIf
from django.db import connection
from django.contrib.gis.geos import MultiLineString, LineString, Point
from django.utils import translation
from geotrek.core.models import Path, Topology
from geotrek.core.tests.factories import TopologyFactory
from geotrek.altimetry.helpers import AltimetryHelper
class ElevationTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create a simple fake DEM
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
if settings.TREKKING_TOPOLOGY_ENABLED:
cls.path = Path.objects.create(geom=LineString((78, 117), (3, 17)))
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_path(self):
self.assertEqual(self.path.ascent, 16)
self.assertEqual(self.path.descent, 0)
self.assertEqual(self.path.min_elevation, 6)
self.assertEqual(self.path.max_elevation, 22)
self.assertEqual(len(self.path.geom_3d.coords), 7)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_profile(self):
profile = self.path.get_elevation_profile()
self.assertAlmostEqual(len(profile), 7)
self.assertAlmostEqual(profile[0][0], 0.0)
self.assertAlmostEqual(profile[-1][0], 125.0)
self.assertAlmostEqual(profile[0][3], 6.0)
self.assertAlmostEqual(profile[1][3], 8.0)
self.assertAlmostEqual(profile[2][3], 10.0)
self.assertAlmostEqual(profile[3][3], 13.0)
self.assertAlmostEqual(profile[4][3], 18.0)
self.assertAlmostEqual(profile[5][3], 20.0)
self.assertAlmostEqual(profile[6][3], 22.0)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_limits(self):
limits = self.path.get_elevation_limits()
self.assertEqual(limits[0], 1106)
self.assertEqual(limits[1], -94)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_topology_line(self):
topo = TopologyFactory.create(paths=[(self.path, 0.2, 0.8)])
topo.save()
topo.get_elevation_profile()
self.assertEqual(topo.ascent, 7)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 10)
self.assertEqual(topo.max_elevation, 17)
self.assertEqual(len(topo.geom_3d.coords), 5)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_elevation_topology_line_nds(self):
"""
No reason for this changements
"""
topo = TopologyFactory.create(geom="SRID=2154;LINESTRING(63 97, 18 37)")
topo.get_elevation_profile()
self.assertEqual(topo.ascent, 5)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 12)
self.assertEqual(topo.max_elevation, 17)
self.assertEqual(len(topo.geom_3d.coords), 5)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_elevation_topology_point(self):
topo = TopologyFactory.create(geom="SRID=2154;POINT(33 57)")
self.assertEqual(topo.geom_3d.coords[2], 15)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 15)
self.assertEqual(topo.max_elevation, 15)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_topology_point_offset(self):
topo = TopologyFactory.create(paths=[(self.path, 0.5, 0.5)], offset=1)
self.assertEqual(topo.geom_3d.coords[2], 15)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 15)
self.assertEqual(topo.max_elevation, 15)
def test_elevation_topology_outside_dem(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
outside_path = Path.objects.create(geom=LineString((200, 200), (300, 300)))
topo = TopologyFactory.create(paths=[(outside_path, 0.5, 0.5)])
else:
topo = TopologyFactory.create(geom="SRID=2154;POINT(250 250)")
self.assertEqual(topo.geom_3d.coords[2], 0)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 0)
self.assertEqual(topo.max_elevation, 0)
class ElevationProfileTest(TestCase):
def test_elevation_profile_multilinestring(self):
geom = MultiLineString(LineString((1.5, 2.5, 8), (2.5, 2.5, 10)),
LineString((2.5, 2.5, 6), (2.5, 0, 7)),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
self.assertEqual(len(profile), 4)
def test_elevation_profile_point(self):
geom = Point(1.5, 2.5, 8, srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
self.assertEqual(profile, [[0, 1.5, 2.5, 8.0]])
def test_elevation_svg_output(self):
geom = LineString((1.5, 2.5, 8), (2.5, 2.5, 10),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
language = translation.get_language()
svg = AltimetryHelper.profile_svg(profile, language)
self.assertIn('Generated with pygal'.encode(), svg)
self.assertIn(settings.ALTIMETRIC_PROFILE_BACKGROUND.encode(), svg)
self.assertIn(settings.ALTIMETRIC_PROFILE_COLOR.encode(), svg)
def test_elevation_altimetry_limits(self):
geom = LineString((1.5, 2.5, 8), (2.5, 2.5, 10),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
limits = AltimetryHelper.altimetry_limits(profile)
self.assertEqual(limits[0], 1108)
self.assertEqual(limits[1], -92)
def fill_raster():
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
class ElevationAreaTest(TestCase):
@classmethod
def setUpTestData(cls):
fill_raster()
cls.geom = LineString((100, 370), (1100, 370), srid=settings.SRID)
cls.area = AltimetryHelper.elevation_area(cls.geom)
def test_area_has_nice_ratio_if_horizontal(self):
self.assertEqual(self.area['size']['x'], 1300.0)
self.assertEqual(self.area['size']['y'], 800.0)
def test_area_provides_altitudes_as_matrix(self):
self.assertEqual(len(self.area['altitudes']), 33)
self.assertEqual(len(self.area['altitudes'][0]), 53)
self.assertEqual(len(self.area['altitudes'][-1]), 53)
def test_area_provides_resolution(self):
self.assertEqual(self.area['resolution']['x'], 53)
self.assertEqual(self.area['resolution']['y'], 33)
def test_resolution_step_depends_on_geometry_size(self):
self.assertEqual(self.area['resolution']['step'], 25)
geom = LineString((100, 370), (100100, 370), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['resolution']['step'], 866)
def test_area_provides_center_as_latlng(self):
self.assertAlmostEqual(self.area['center']['lng'], -1.3594758650394245)
self.assertAlmostEqual(self.area['center']['lat'], -5.981351702397734)
def test_area_provides_center_as_xy(self):
self.assertEqual(self.area['center']['x'], 600.0)
self.assertEqual(self.area['center']['y'], 369.0)
def test_area_provides_extent_as_xy(self):
extent = self.area['extent']
self.assertEqual(extent['northwest']['x'], -50.0)
self.assertEqual(extent['northwest']['y'], 769.0)
self.assertEqual(extent['southeast']['x'], 1250.0)
self.assertEqual(extent['southeast']['y'], -31.0)
def test_area_provides_extent_as_latlng(self):
extent = self.area['extent']
self.assertAlmostEqual(extent['northeast']['lat'], -5.9786368380250385)
self.assertAlmostEqual(extent['northeast']['lng'], -1.35556992351484)
self.assertAlmostEqual(extent['southwest']['lat'], -5.9840665893459875)
self.assertAlmostEqual(extent['southwest']['lng'], -1.3633815583740085)
def test_area_provides_altitudes_extent(self):
extent = self.area['extent']
self.assertEqual(extent['altitudes']['max'], 45)
self.assertEqual(extent['altitudes']['min'], 0)
class ElevationOtherGeomAreaTest(TestCase):
@classmethod
def setUpTestData(cls):
fill_raster()
def test_area_small_geom(self):
geom = LineString((10, 10), (10, 5), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
extent = area['extent']
self.assertEqual(extent['altitudes']['max'], 30)
self.assertEqual(extent['altitudes']['min'], 30)
def test_area_has_nice_ratio_if_vertical(self):
geom = LineString((0, 0), (0, 1000), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['size']['x'], 800.0)
self.assertEqual(area['size']['y'], 1300.0)
def test_area_has_nice_ratio_if_square_enough(self):
geom = LineString((0, 0), (1000, 1000), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['size']['x'], 1300.0)
self.assertEqual(area['size']['y'], 1300.0)
def fill_raster_order():
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(250, 250, 0, 250, 25, -25, 0, 0, %s))',
[settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = []
for x in range(0, 10):
demvalues.append(list(range(x * 2, x * 2 + 10)))
for y in range(0, 10):
for x in range(0, 10):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)',
[x + 1, y + 1, demvalues[y][x]])
class ElevationRightOrderAreaTest(TestCase):
@classmethod
def setUpTestData(cls):
fill_raster_order()
def test_area_order_lines_columns(self):
"""
We check that the order is always the same not depending on the database.
Firstly we iterate on lines then columns. And it should be always the same order.
"""
geom = LineString((125, 240), (125, 50), srid=settings.SRID)
area_1 = AltimetryHelper.elevation_area(geom)
self.assertEqual(area_1['altitudes'], [[18, 19, 20, 21, 22, 23, 24],
[16, 17, 18, 19, 20, 21, 22],
[14, 15, 16, 17, 18, 19, 20],
[12, 13, 14, 15, 16, 17, 18],
[10, 11, 12, 13, 14, 15, 16],
[8, 9, 10, 11, 12, 13, 14],
[6, 7, 8, 9, 10, 11, 12],
[4, 5, 6, 7, 8, 9, 10],
[2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6]])
geom = LineString((240, 125), (50, 125), srid=settings.SRID)
area_2 = AltimetryHelper.elevation_area(geom)
self.assertEqual(area_2['altitudes'], [[12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
[2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
class LengthTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create a simple fake DEM
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
cls.path = Path.objects.create(geom=LineString((1, 101), (81, 101), (81, 99)))
def test_2dlength_is_preserved(self):
self.assertEqual(self.path.geom_3d.length, self.path.geom.length)
def test_3dlength(self):
# before smoothing: (1 101 0, 21 101 0, 41 101 0, 61 101 3, 81 101 5, 81 99 15)
# after smoothing: (1 101 0, 21 101 0, 41 101 0, 61 101 1, 81 101 3, 81 99 9)
# length: 20 + 20 + (20 ** 2 + 1) ** .5 + (20 ** 2 + 2 ** 2) ** .5 + (2 ** 2 + 6 ** 2) ** .5
self.assertEqual(round(self.path.length, 9), 83.127128724)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class SamplingTestPath(TestCase):
model = Path
step = settings.ALTIMETRIC_PROFILE_PRECISION
@classmethod
def setUpTestData(cls):
if cls.model is None:
SkipTest(reason="No model")
# Create a fake empty DEM to prevent trigger optimisation to skip sampling
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_AddBand(ST_MakeEmptyRaster(100, 100, 0, 100, 25, -25, 0, 0, %s), \'16BSI\'))',
[settings.SRID])
def test_0_first(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_0_last(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_1(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_24(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_25(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_26(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_49(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_50(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_51(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_1m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (1, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_24m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1), (0, self.step * 2 - 2)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_25m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_26m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1), (0, self.step * 2 + 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_49m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1), (0, self.step * 4 - 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_50m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2), (0, self.step * 4)))
self.assertEqual(len(path.geom_3d.coords), 7)
def test_51m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1), (0, self.step * 4 + 2)))
self.assertEqual(len(path.geom_3d.coords), 7)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
class SamplingTestTopology(TestCase):
model = Topology
step = settings.ALTIMETRIC_PROFILE_PRECISION
@classmethod
def setUpTestData(cls):
if cls.model is None:
SkipTest(reason="None")
# Create a fake empty DEM to prevent trigger optimisation to skip sampling
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))',
[settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
def test_0_first(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_0_last(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_1(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_24(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_25(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_26(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_49(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_50(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_51(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_1m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (1, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_24m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1), (0, self.step * 2 - 2)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_25m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_26m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1), (0, self.step * 2 + 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_49m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1), (0, self.step * 4 - 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_50m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2), (0, self.step * 4)))
self.assertEqual(len(path.geom_3d.coords), 7)
def test_51m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1), (0, self.step * 4 + 2)))
self.assertEqual(len(path.geom_3d.coords), 7)
| |
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(
os.path.join(BASE_DIR, 'apps')
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework_swagger',
'rest_framework',
'bootstrap3',
'jquery',
'corsheaders',
'apps.core',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.normpath(os.path.join(BASE_DIR, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db/development.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = os.path.join(ROOT_DIR, 'assets')
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.normpath(os.path.join(ROOT_DIR, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filters': ['require_debug_false'],
'filename': 'log/error.log',
'formatter': 'verbose'
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Rest framework access rules
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
| |
# Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import uuid
from oslo_config import cfg
from oslo_utils import importutils
import six
interface_map = {
'vsctl': 'neutron.agent.ovsdb.impl_vsctl.OvsdbVsctl',
'native': 'neutron.agent.ovsdb.impl_idl.OvsdbIdl',
}
OPTS = [
cfg.StrOpt('ovsdb_interface',
choices=interface_map.keys(),
default='vsctl',
help=_('The interface for interacting with the OVSDB')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
@six.add_metaclass(abc.ABCMeta)
class Command(object):
"""An OSVDB command that can be executed in a transaction
:attr result: The result of executing the command in a transaction
"""
@abc.abstractmethod
def execute(self, **transaction_options):
"""Immediately execute an OVSDB command
This implicitly creates a transaction with the passed options and then
executes it, returning the value of the executed transaction
:param transaction_options: Options to pass to the transaction
"""
@six.add_metaclass(abc.ABCMeta)
class Transaction(object):
@abc.abstractmethod
def commit(self):
"""Commit the transaction to OVSDB"""
@abc.abstractmethod
def add(self, command):
"""Append an OVSDB operation to the transaction"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
if exc_type is None:
self.result = self.commit()
@six.add_metaclass(abc.ABCMeta)
class API(object):
def __init__(self, context):
self.context = context
@staticmethod
def get(context, iface_name=None):
"""Return the configured OVSDB API implementation"""
iface = importutils.import_class(
interface_map[iface_name or cfg.CONF.OVS.ovsdb_interface])
return iface(context)
@abc.abstractmethod
def transaction(self, check_error=False, log_errors=True, **kwargs):
"""Create a transaction
:param check_error: Allow the transaction to raise an exception?
:type check_error: bool
:param log_errors: Log an error if the transaction fails?
:type log_errors: bool
:returns: A new transaction
:rtype: :class:`Transaction`
"""
@abc.abstractmethod
def add_br(self, name, may_exist=True):
"""Create an command to add an OVS bridge
:param name: The name of the bridge
:type name: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_br(self, name, if_exists=True):
"""Create a command to delete an OVS bridge
:param name: The name of the bridge
:type name: string
:param if_exists: Do not fail if the bridge does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def br_exists(self, name):
"""Create a command to check if an OVS bridge exists
:param name: The name of the bridge
:type name: string
:returns: :class:`Command` with bool result
"""
@abc.abstractmethod
def port_to_br(self, name):
"""Create a command to return the name of the bridge with the port
:param name: The name of the OVS port
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def iface_to_br(self, name):
"""Create a command to return the name of the bridge with the interface
:param name: The name of the OVS interface
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def list_br(self):
"""Create a command to return the current list of OVS bridge names
:returns: :class:`Command` with list of bridge names result
"""
@abc.abstractmethod
def br_get_external_id(self, name, field):
"""Create a command to return a field from the Bridge's external_ids
:param name: The name of the OVS Bridge
:type name: string
:param field: The external_ids field to return
:type field: string
:returns: :class:`Command` with field value result
"""
@abc.abstractmethod
def db_set(self, table, record, *col_values):
"""Create a command to set fields in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type table: string
:param col_values: The columns and their associated values
:type col_values: Tuples of (column, value). Values may be atomic
values or unnested sequences/mappings
:returns: :class:`Command` with no result
"""
# TODO(twilson) Consider handling kwargs for arguments where order
# doesn't matter. Though that would break the assert_called_once_with
# unit tests
@abc.abstractmethod
def db_clear(self, table, record, column):
"""Create a command to clear a field's value in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type record: string
:param column: The column whose value should be cleared
:type column: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_get(self, table, record, column):
"""Create a command to return a field's value in a record
:param table: The OVS table containing the record to be queried
:type table: string
:param record: The record id (name/uuid) to be queried
:type record: string
:param column: The column whose value should be returned
:type column: string
:returns: :class:`Command` with the field's value result
"""
@abc.abstractmethod
def db_list(self, table, records=None, columns=None, if_exists=False):
"""Create a command to return a list of OVSDB records
:param table: The OVS table to query
:type table: string
:param records: The records to return values from
:type records: list of record ids (names/uuids)
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:param if_exists: Do not fail if the bridge does not exist
:type if_exists: bool
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def db_find(self, table, *conditions, **kwargs):
"""Create a command to return find OVSDB records matching conditions
:param table: The OVS table to query
:type table: string
:param conditions:The conditions to satisfy the query
:type conditions: 3-tuples containing (column, operation, match)
Examples:
atomic: ('tag', '=', 7)
map: ('external_ids' '=', {'iface-id': 'xxx'})
field exists?
('external_ids', '!=', {'iface-id', ''})
set contains?:
('protocols', '{>=}', 'OpenFlow13')
See the ovs-vsctl man page for more operations
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def set_controller(self, bridge, controllers):
"""Create a command to set an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:param controllers: The controller strings
:type controllers: list of strings, see ovs-vsctl manpage for format
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_controller(self, bridge):
"""Create a command to clear an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def get_controller(self, bridge):
"""Create a command to return an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of controller strings result
"""
@abc.abstractmethod
def set_fail_mode(self, bridge, mode):
"""Create a command to set an OVS bridge's failure mode
:param bridge: The name of the bridge
:type bridge: string
:param mode: The failure mode
:type mode: "secure" or "standalone"
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def add_port(self, bridge, port, may_exist=True):
"""Create a command to add a port to an OVS bridge
:param bridge: The name of the bridge
:type bridge: string
:param port: The name of the port
:type port: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_port(self, port, bridge=None, if_exists=True):
"""Create a command to delete a port an OVS port
:param port: The name of the port
:type port: string
:param bridge: Only delete port if it is attached to this bridge
:type bridge: string
:param if_exists: Do not fail if the bridge does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def list_ports(self, bridge):
"""Create a command to list the names of ports on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of port names result
"""
@abc.abstractmethod
def list_ifaces(self, bridge):
"""Create a command to list the names of interfaces on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of interfaces names result
"""
def val_to_py(val):
"""Convert a json ovsdb return value to native python object"""
if isinstance(val, collections.Sequence) and len(val) == 2:
if val[0] == "uuid":
return uuid.UUID(val[1])
elif val[0] == "set":
return [val_to_py(x) for x in val[1]]
elif val[0] == "map":
return {val_to_py(x): val_to_py(y) for x, y in val[1]}
return val
def py_to_val(pyval):
"""Convert python value to ovs-vsctl value argument"""
if isinstance(pyval, bool):
return 'true' if pyval is True else 'false'
elif pyval == '':
return '""'
else:
return pyval
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.